summaryrefslogtreecommitdiffstats
path: root/hw
diff options
context:
space:
mode:
authorCédric Le Goater <clg@kaod.org>2019-02-25 11:09:51 +0100
committerStewart Smith <stewart@linux.ibm.com>2019-03-28 15:24:12 +1100
commitbbcbbd3e071ffe654596ce19ddf8d99b4176bbc3 (patch)
tree613067df22ff1fee86846187b04e224bef609fd0 /hw
parent84aa25921a09c7c6b703cf56a9396e2d55d49223 (diff)
downloadtalos-skiboot-bbcbbd3e071ffe654596ce19ddf8d99b4176bbc3.tar.gz
talos-skiboot-bbcbbd3e071ffe654596ce19ddf8d99b4176bbc3.zip
xive: Add calls to save/restore the queues and VPs HW state
To be able to support migration of guests using the XIVE native exploitation mode, (where the queue is effectively owned by the guest), KVM needs to be able to save and restore the HW-modified fields of the queue, such as the current queue producer pointer and generation bit, and to retrieve the modified thread context registers of the VP from the NVT structure : the VP interrupt pending bits. However, there is no need to set back the NVT structure on P9. P10 should be the same. Based on previous work from BenH. Signed-off-by: Cédric Le Goater <clg@kaod.org> Signed-off-by: Stewart Smith <stewart@linux.ibm.com>
Diffstat (limited to 'hw')
-rw-r--r--hw/xive.c130
1 files changed, 130 insertions, 0 deletions
diff --git a/hw/xive.c b/hw/xive.c
index b863b634..f3822652 100644
--- a/hw/xive.c
+++ b/hw/xive.c
@@ -1397,11 +1397,21 @@ static int64_t xive_ivc_scrub(struct xive *x, uint64_t block, uint64_t idx)
return __xive_cache_scrub(x, xive_cache_ivc, block, idx, false, false);
}
+static int64_t xive_vpc_scrub(struct xive *x, uint64_t block, uint64_t idx)
+{
+ return __xive_cache_scrub(x, xive_cache_vpc, block, idx, false, false);
+}
+
static int64_t xive_vpc_scrub_clean(struct xive *x, uint64_t block, uint64_t idx)
{
return __xive_cache_scrub(x, xive_cache_vpc, block, idx, true, false);
}
+static int64_t xive_eqc_scrub(struct xive *x, uint64_t block, uint64_t idx)
+{
+ return __xive_cache_scrub(x, xive_cache_eqc, block, idx, false, false);
+}
+
static int64_t __xive_cache_watch(struct xive *x, enum xive_cache_type ctype,
uint64_t block, uint64_t idx,
uint32_t start_dword, uint32_t dword_count,
@@ -4177,6 +4187,85 @@ static int64_t opal_xive_set_queue_info(uint64_t vp, uint32_t prio,
return rc;
}
+static int64_t opal_xive_get_queue_state(uint64_t vp, uint32_t prio,
+ uint32_t *out_qtoggle,
+ uint32_t *out_qindex)
+{
+ uint32_t blk, idx;
+ struct xive *x;
+ struct xive_eq *eq;
+ int64_t rc;
+
+ if (xive_mode != XIVE_MODE_EXPL)
+ return OPAL_WRONG_STATE;
+
+ if (!out_qtoggle || !out_qindex ||
+ !xive_eq_for_target(vp, prio, &blk, &idx))
+ return OPAL_PARAMETER;
+
+ x = xive_from_vc_blk(blk);
+ if (!x)
+ return OPAL_PARAMETER;
+
+ eq = xive_get_eq(x, idx);
+ if (!eq)
+ return OPAL_PARAMETER;
+
+ /* Scrub the queue */
+ lock(&x->lock);
+ rc = xive_eqc_scrub(x, blk, idx);
+ unlock(&x->lock);
+ if (rc)
+ return rc;
+
+ /* We don't do disable queues */
+ if (!(eq->w0 & EQ_W0_VALID))
+ return OPAL_WRONG_STATE;
+
+ *out_qtoggle = GETFIELD(EQ_W1_GENERATION, eq->w1);
+ *out_qindex = GETFIELD(EQ_W1_PAGE_OFF, eq->w1);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t opal_xive_set_queue_state(uint64_t vp, uint32_t prio,
+ uint32_t qtoggle, uint32_t qindex)
+{
+ uint32_t blk, idx;
+ struct xive *x;
+ struct xive_eq *eq, new_eq;
+ int64_t rc;
+
+ if (xive_mode != XIVE_MODE_EXPL)
+ return OPAL_WRONG_STATE;
+
+ if (!xive_eq_for_target(vp, prio, &blk, &idx))
+ return OPAL_PARAMETER;
+
+ x = xive_from_vc_blk(blk);
+ if (!x)
+ return OPAL_PARAMETER;
+
+ eq = xive_get_eq(x, idx);
+ if (!eq)
+ return OPAL_PARAMETER;
+
+ /* We don't do disable queues */
+ if (!(eq->w0 & EQ_W0_VALID))
+ return OPAL_WRONG_STATE;
+
+ new_eq = *eq;
+
+ new_eq.w1 = SETFIELD(EQ_W1_GENERATION, new_eq.w1, qtoggle);
+ new_eq.w1 = SETFIELD(EQ_W1_PAGE_OFF, new_eq.w1, qindex);
+
+ lock(&x->lock);
+ rc = xive_eqc_cache_update(x, blk, idx, 0, 4, &new_eq, false, false);
+ unlock(&x->lock);
+
+ return rc;
+}
+
static int64_t opal_xive_donate_page(uint32_t chip_id, uint64_t addr)
{
struct proc_chip *c = get_chip(chip_id);
@@ -4415,6 +4504,44 @@ bail:
return rc;
}
+static int64_t opal_xive_get_vp_state(uint64_t vp_id, uint64_t *out_state)
+{
+ struct xive *x;
+ struct xive_vp *vp;
+ uint32_t blk, idx;
+ int64_t rc;
+ bool group;
+
+ if (!out_state || !xive_decode_vp(vp_id, &blk, &idx, NULL, &group))
+ return OPAL_PARAMETER;
+ if (group)
+ return OPAL_PARAMETER;
+ x = xive_from_pc_blk(blk);
+ if (!x)
+ return OPAL_PARAMETER;
+ vp = xive_get_vp(x, idx);
+ if (!vp)
+ return OPAL_PARAMETER;
+
+ /* Scrub the vp */
+ lock(&x->lock);
+ rc = xive_vpc_scrub(x, blk, idx);
+ unlock(&x->lock);
+ if (rc)
+ return rc;
+
+ if (!(vp->w0 & VP_W0_VALID))
+ return OPAL_WRONG_STATE;
+
+ /*
+ * Return word4 and word5 which contain the saved HW thread
+ * context. The IPB register is all we care for now on P9.
+ */
+ *out_state = (((uint64_t)vp->w4) << 32) | vp->w5;
+
+ return OPAL_SUCCESS;
+}
+
static void xive_cleanup_cpu_tima(struct cpu_thread *c)
{
struct xive_cpu_state *xs = c->xstate;
@@ -5336,5 +5463,8 @@ void init_xive(void)
opal_register(OPAL_XIVE_SET_VP_INFO, opal_xive_set_vp_info, 3);
opal_register(OPAL_XIVE_SYNC, opal_xive_sync, 2);
opal_register(OPAL_XIVE_DUMP, opal_xive_dump, 2);
+ opal_register(OPAL_XIVE_GET_QUEUE_STATE, opal_xive_get_queue_state, 4);
+ opal_register(OPAL_XIVE_SET_QUEUE_STATE, opal_xive_set_queue_state, 4);
+ opal_register(OPAL_XIVE_GET_VP_STATE, opal_xive_get_vp_state, 2);
}
OpenPOWER on IntegriCloud