From 4d5f2c04c8a462a1c7ad3fdc138515c84fa65379 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 9 Feb 2017 17:15:41 +0100 Subject: KVM: s390: log runtime instrumentation enablement We handle runtime instrumentation enablement either lazy or via sync_regs on migration. Make sure to add a debug log entry for that per CPU on the first occurrence. Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 14 ++++++++------ arch/s390/kvm/priv.c | 1 + 2 files changed, 9 insertions(+), 6 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index fd6cd05bb6a7..5fda4686e817 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2719,6 +2719,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { + struct runtime_instr_cb *riccb; + + riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) @@ -2747,12 +2750,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) * we should enable RI here instead of doing the lazy enablement. */ if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && - test_kvm_facility(vcpu->kvm, 64)) { - struct runtime_instr_cb *riccb = - (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; - - if (riccb->valid) - vcpu->arch.sie_block->ecb3 |= 0x01; + test_kvm_facility(vcpu->kvm, 64) && + riccb->valid && + !(vcpu->arch.sie_block->ecb3 & 0x01)) { + VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)"); + vcpu->arch.sie_block->ecb3 |= 0x01; } save_access_regs(vcpu->arch.host_acrs); restore_access_regs(vcpu->run->s.regs.acrs); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 64b6a309f2c4..93d6cde8c3ea 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -37,6 +37,7 @@ static int handle_ri(struct kvm_vcpu *vcpu) { if (test_kvm_facility(vcpu->kvm, 64)) { + VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); vcpu->arch.sie_block->ecb3 |= 0x01; kvm_s390_retry_instr(vcpu); return 0; -- cgit v1.2.1 From c0a6bfdc18b83290b65372a7687134052f382bdf Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Mon, 27 Feb 2017 21:14:47 +0100 Subject: KVM: s390: Handle sthyi also for instruction intercept Right now we handle the STHYI only via the operation exception intercept (illegal instruction). If hardware ever decides to provide an instruction intercept for STHYI, we should handle that as well. Signed-off-by: Christian Borntraeger --- arch/s390/kvm/intercept.c | 3 +-- arch/s390/kvm/priv.c | 1 + arch/s390/kvm/sthyi.c | 3 +++ 3 files changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 59920f96ebc0..0a1764655153 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -368,8 +368,7 @@ static int handle_operexc(struct kvm_vcpu *vcpu) trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa, vcpu->arch.sie_block->ipb); - if (vcpu->arch.sie_block->ipa == 0xb256 && - test_kvm_facility(vcpu->kvm, 74)) + if (vcpu->arch.sie_block->ipa == 0xb256) return handle_sthyi(vcpu); if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0) diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 93d6cde8c3ea..26f30bab9c2f 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -760,6 +760,7 @@ static const intercept_handler_t b2_handlers[256] = { [0x3b] = handle_io_inst, [0x3c] = handle_io_inst, [0x50] = handle_ipte_interlock, + [0x56] = handle_sthyi, [0x5f] = handle_io_inst, [0x74] = handle_io_inst, [0x76] = handle_io_inst, diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c index 05c98bb853cf..926b5244263e 100644 --- a/arch/s390/kvm/sthyi.c +++ b/arch/s390/kvm/sthyi.c @@ -404,6 +404,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu) u64 code, addr, cc = 0; struct sthyi_sctns *sctns = NULL; + if (!test_kvm_facility(vcpu->kvm, 74)) + return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); + /* * STHYI requires extensive locking in the higher hypervisors * and is very computational/memory expensive. Therefore we -- cgit v1.2.1 From 0c9d86833dfdafaf580cc49735661d002ef07ee3 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 13 Mar 2017 11:48:28 +0100 Subject: KVM: s390: use defines for execution controls Let's replace the bitmasks by defines. Reconstructed from code, comments and commit messages. Tried to keep the defines short and map them to feature names. In case they don't completely map to features, keep them in the stye of ICTL defines. This effectively drops all "U" from the existing numbers. I think this should be fine (as similarly done for e.g. ICTL defines). I am not 100% sure about the ECA_MVPGI and ECA_PROTEXCI bits as they are always used in pairs. Signed-off-by: David Hildenbrand Message-Id: <20170313104828.13362-1-david@redhat.com> Signed-off-by: Christian Borntraeger [some renames, add one missing place] --- arch/s390/include/asm/kvm_host.h | 18 +++++++++++++++++- arch/s390/kvm/gaccess.c | 6 +++--- arch/s390/kvm/kvm-s390.c | 36 ++++++++++++++++++------------------ arch/s390/kvm/kvm-s390.h | 2 +- arch/s390/kvm/priv.c | 2 +- arch/s390/kvm/vsie.c | 33 ++++++++++++++++----------------- 6 files changed, 56 insertions(+), 41 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index a41faf34b034..18e9c9d83b23 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -164,6 +164,13 @@ struct kvm_s390_sie_block { #define ICTL_RRBE 0x00001000 #define ICTL_TPROT 0x00000200 __u32 ictl; /* 0x0048 */ +#define ECA_CEI 0x80000000 +#define ECA_IB 0x40000000 +#define ECA_SIGPI 0x10000000 +#define ECA_MVPGI 0x01000000 +#define ECA_VX 0x00020000 +#define ECA_PROTEXCI 0x00002000 +#define ECA_SII 0x00000001 __u32 eca; /* 0x004c */ #define ICPT_INST 0x04 #define ICPT_PROGI 0x08 @@ -182,10 +189,18 @@ struct kvm_s390_sie_block { __u32 ipb; /* 0x0058 */ __u32 scaoh; /* 0x005c */ __u8 reserved60; /* 0x0060 */ +#define ECB_TE 0x10 +#define ECB_SRSI 0x04 +#define ECB_HOSTPROTINT 0x02 __u8 ecb; /* 0x0061 */ +#define ECB2_CMMA 0x80 +#define ECB2_IEP 0x20 +#define ECB2_PFMFI 0x08 +#define ECB2_ESCA 0x04 __u8 ecb2; /* 0x0062 */ -#define ECB3_AES 0x04 #define ECB3_DEA 0x08 +#define ECB3_AES 0x04 +#define ECB3_RI 0x01 __u8 ecb3; /* 0x0063 */ __u32 scaol; /* 0x0064 */ __u8 reserved68[4]; /* 0x0068 */ @@ -224,6 +239,7 @@ struct kvm_s390_sie_block { __u8 reserved1a4[20]; /* 0x01a4 */ __u64 cbrlo; /* 0x01b8 */ __u8 reserved1c0[8]; /* 0x01c0 */ +#define ECD_HOSTREGMGMT 0x20000000 __u32 ecd; /* 0x01c8 */ __u8 reserved1cc[18]; /* 0x01cc */ __u64 pp; /* 0x01de */ diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index d55c829a5944..709aca9ceb05 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -262,7 +262,7 @@ struct aste { int ipte_lock_held(struct kvm_vcpu *vcpu) { - if (vcpu->arch.sie_block->eca & 1) { + if (vcpu->arch.sie_block->eca & ECA_SII) { int rc; read_lock(&vcpu->kvm->arch.sca_lock); @@ -361,7 +361,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu) void ipte_lock(struct kvm_vcpu *vcpu) { - if (vcpu->arch.sie_block->eca & 1) + if (vcpu->arch.sie_block->eca & ECA_SII) ipte_lock_siif(vcpu); else ipte_lock_simple(vcpu); @@ -369,7 +369,7 @@ void ipte_lock(struct kvm_vcpu *vcpu) void ipte_unlock(struct kvm_vcpu *vcpu) { - if (vcpu->arch.sie_block->eca & 1) + if (vcpu->arch.sie_block->eca & ECA_SII) ipte_unlock_siif(vcpu); else ipte_unlock_simple(vcpu); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 5fda4686e817..976373c03138 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1646,7 +1646,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu) sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU; - vcpu->arch.sie_block->ecb2 |= 0x04U; + vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); } else { struct bsca_block *sca = vcpu->kvm->arch.sca; @@ -1700,7 +1700,7 @@ static int sca_switch_to_extended(struct kvm *kvm) kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) { vcpu->arch.sie_block->scaoh = scaoh; vcpu->arch.sie_block->scaol = scaol; - vcpu->arch.sie_block->ecb2 |= 0x04U; + vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; } kvm->arch.sca = new_sca; kvm->arch.use_esca = 1; @@ -1939,8 +1939,8 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) if (!vcpu->arch.sie_block->cbrlo) return -ENOMEM; - vcpu->arch.sie_block->ecb2 |= 0x80; - vcpu->arch.sie_block->ecb2 &= ~0x08; + vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; + vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI; return 0; } @@ -1970,28 +1970,28 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */ if (MACHINE_HAS_ESOP) - vcpu->arch.sie_block->ecb |= 0x02; + vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; if (test_kvm_facility(vcpu->kvm, 9)) - vcpu->arch.sie_block->ecb |= 0x04; + vcpu->arch.sie_block->ecb |= ECB_SRSI; if (test_kvm_facility(vcpu->kvm, 73)) - vcpu->arch.sie_block->ecb |= 0x10; + vcpu->arch.sie_block->ecb |= ECB_TE; if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi) - vcpu->arch.sie_block->ecb2 |= 0x08; + vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; if (test_kvm_facility(vcpu->kvm, 130)) - vcpu->arch.sie_block->ecb2 |= 0x20; - vcpu->arch.sie_block->eca = 0x1002000U; + vcpu->arch.sie_block->ecb2 |= ECB2_IEP; + vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; if (sclp.has_cei) - vcpu->arch.sie_block->eca |= 0x80000000U; + vcpu->arch.sie_block->eca |= ECA_CEI; if (sclp.has_ib) - vcpu->arch.sie_block->eca |= 0x40000000U; + vcpu->arch.sie_block->eca |= ECA_IB; if (sclp.has_siif) - vcpu->arch.sie_block->eca |= 1; + vcpu->arch.sie_block->eca |= ECA_SII; if (sclp.has_sigpif) - vcpu->arch.sie_block->eca |= 0x10000000U; + vcpu->arch.sie_block->eca |= ECA_SIGPI; if (test_kvm_facility(vcpu->kvm, 129)) { - vcpu->arch.sie_block->eca |= 0x00020000; - vcpu->arch.sie_block->ecd |= 0x20000000; + vcpu->arch.sie_block->eca |= ECA_VX; + vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; } vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; @@ -2752,9 +2752,9 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && test_kvm_facility(vcpu->kvm, 64) && riccb->valid && - !(vcpu->arch.sie_block->ecb3 & 0x01)) { + !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)"); - vcpu->arch.sie_block->ecb3 |= 0x01; + vcpu->arch.sie_block->ecb3 |= ECB3_RI; } save_access_regs(vcpu->arch.host_acrs); restore_access_regs(vcpu->run->s.regs.acrs); diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index af9fa91a0c91..dfdcde125af6 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -25,7 +25,7 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); /* Transactional Memory Execution related macros */ -#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) +#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) #define TDB_FORMAT1 1 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 26f30bab9c2f..d42bb2d03041 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -38,7 +38,7 @@ static int handle_ri(struct kvm_vcpu *vcpu) { if (test_kvm_facility(vcpu->kvm, 64)) { VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); - vcpu->arch.sie_block->ecb3 |= 0x01; + vcpu->arch.sie_block->ecb3 |= ECB3_RI; kvm_s390_retry_instr(vcpu); return 0; } else diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 5491be39776b..d91f1df5a854 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -249,7 +249,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) { struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; - bool had_tx = scb_s->ecb & 0x10U; + bool had_tx = scb_s->ecb & ECB_TE; unsigned long new_mso = 0; int rc; @@ -307,34 +307,34 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->ihcpu = scb_o->ihcpu; /* MVPG and Protection Exception Interpretation are always available */ - scb_s->eca |= scb_o->eca & 0x01002000U; + scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI); /* Host-protection-interruption introduced with ESOP */ if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP)) - scb_s->ecb |= scb_o->ecb & 0x02U; + scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT; /* transactional execution */ if (test_kvm_facility(vcpu->kvm, 73)) { /* remap the prefix is tx is toggled on */ - if ((scb_o->ecb & 0x10U) && !had_tx) + if ((scb_o->ecb & ECB_TE) && !had_tx) prefix_unmapped(vsie_page); - scb_s->ecb |= scb_o->ecb & 0x10U; + scb_s->ecb |= scb_o->ecb & ECB_TE; } /* SIMD */ if (test_kvm_facility(vcpu->kvm, 129)) { - scb_s->eca |= scb_o->eca & 0x00020000U; - scb_s->ecd |= scb_o->ecd & 0x20000000U; + scb_s->eca |= scb_o->eca & ECA_VX; + scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT; } /* Run-time-Instrumentation */ if (test_kvm_facility(vcpu->kvm, 64)) - scb_s->ecb3 |= scb_o->ecb3 & 0x01U; + scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI; /* Instruction Execution Prevention */ if (test_kvm_facility(vcpu->kvm, 130)) - scb_s->ecb2 |= scb_o->ecb2 & 0x20U; + scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP; if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF)) - scb_s->eca |= scb_o->eca & 0x00000001U; + scb_s->eca |= scb_o->eca & ECA_SII; if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB)) - scb_s->eca |= scb_o->eca & 0x40000000U; + scb_s->eca |= scb_o->eca & ECA_IB; if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI)) - scb_s->eca |= scb_o->eca & 0x80000000U; + scb_s->eca |= scb_o->eca & ECA_CEI; prepare_ibc(vcpu, vsie_page); rc = shadow_crycb(vcpu, vsie_page); @@ -406,7 +406,7 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) prefix += scb_s->mso; rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix); - if (!rc && (scb_s->ecb & 0x10U)) + if (!rc && (scb_s->ecb & ECB_TE)) rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix + PAGE_SIZE); /* @@ -543,7 +543,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) } gpa = scb_o->itdba & ~0xffUL; - if (gpa && (scb_s->ecb & 0x10U)) { + if (gpa && (scb_s->ecb & ECB_TE)) { if (!(gpa & ~0x1fffU)) { rc = set_validity_icpt(scb_s, 0x0080U); goto unpin; @@ -558,8 +558,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) } gpa = scb_o->gvrd & ~0x1ffUL; - if (gpa && (scb_s->eca & 0x00020000U) && - !(scb_s->ecd & 0x20000000U)) { + if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { if (!(gpa & ~0x1fffUL)) { rc = set_validity_icpt(scb_s, 0x1310U); goto unpin; @@ -577,7 +576,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) } gpa = scb_o->riccbd & ~0x3fUL; - if (gpa && (scb_s->ecb3 & 0x01U)) { + if (gpa && (scb_s->ecb3 & ECB3_RI)) { if (!(gpa & ~0x1fffUL)) { rc = set_validity_icpt(scb_s, 0x0043U); goto unpin; -- cgit v1.2.1 From 947b897204c417d22ad90e5f354858d78de15a07 Mon Sep 17 00:00:00 2001 From: Farhan Ali Date: Fri, 17 Mar 2017 09:59:32 -0400 Subject: KVM: s390: Use defines for intercept code Let's use #define values for better readability. Signed-off-by: Farhan Ali Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 3 +++ arch/s390/kvm/intercept.c | 20 ++++++++++---------- 2 files changed, 13 insertions(+), 10 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 18e9c9d83b23..54e36e7cd201 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -175,7 +175,10 @@ struct kvm_s390_sie_block { #define ICPT_INST 0x04 #define ICPT_PROGI 0x08 #define ICPT_INSTPROGI 0x0C +#define ICPT_EXTREQ 0x10 #define ICPT_EXTINT 0x14 +#define ICPT_IOREQ 0x18 +#define ICPT_WAIT 0x1c #define ICPT_VALIDITY 0x20 #define ICPT_STOP 0x28 #define ICPT_OPEREXC 0x2C diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 0a1764655153..e831f4b3e1c1 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -403,26 +403,26 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; switch (vcpu->arch.sie_block->icptcode) { - case 0x10: - case 0x18: + case ICPT_EXTREQ: + case ICPT_IOREQ: return handle_noop(vcpu); - case 0x04: + case ICPT_INST: rc = handle_instruction(vcpu); break; - case 0x08: + case ICPT_PROGI: return handle_prog(vcpu); - case 0x14: + case ICPT_EXTINT: return handle_external_interrupt(vcpu); - case 0x1c: + case ICPT_WAIT: return kvm_s390_handle_wait(vcpu); - case 0x20: + case ICPT_VALIDITY: return handle_validity(vcpu); - case 0x28: + case ICPT_STOP: return handle_stop(vcpu); - case 0x2c: + case ICPT_OPEREXC: rc = handle_operexc(vcpu); break; - case 0x38: + case ICPT_PARTEXEC: rc = handle_partial_execution(vcpu); break; default: -- cgit v1.2.1 From 4e0b1ab72b8af961bcaca9ec1475279c1cd9579c Mon Sep 17 00:00:00 2001 From: Fan Zhang Date: Tue, 29 Nov 2016 07:17:55 +0100 Subject: KVM: s390: gs support for kvm guests This patch adds guarded storage support for KVM guest. We need to setup the necessary control blocks, the kvm_run structure for the new registers, the necessary wrappers for VSIE, as well as the machine check save areas. GS is enabled lazily and the register saving and reloading is done in KVM code. As this feature adds new content for migration, we provide a new capability for enablement (KVM_CAP_S390_GS). Signed-off-by: Fan Zhang Reviewed-by: Christian Borntraeger Reviewed-by: Janosch Frank Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 8 +++++- arch/s390/include/uapi/asm/kvm.h | 14 ++++++++- arch/s390/kvm/intercept.c | 1 + arch/s390/kvm/interrupt.c | 33 +++++++++++++++++++-- arch/s390/kvm/kvm-s390.c | 62 ++++++++++++++++++++++++++++++++++++++++ arch/s390/kvm/kvm-s390.h | 1 + arch/s390/kvm/priv.c | 27 +++++++++++++++++ arch/s390/kvm/vsie.c | 39 +++++++++++++++++++++++++ 8 files changed, 181 insertions(+), 4 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 54e36e7cd201..1af090d93bf5 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -25,6 +25,7 @@ #include #include #include +#include #define KVM_S390_BSCA_CPU_SLOTS 64 #define KVM_S390_ESCA_CPU_SLOTS 248 @@ -192,6 +193,7 @@ struct kvm_s390_sie_block { __u32 ipb; /* 0x0058 */ __u32 scaoh; /* 0x005c */ __u8 reserved60; /* 0x0060 */ +#define ECB_GS 0x40 #define ECB_TE 0x10 #define ECB_SRSI 0x04 #define ECB_HOSTPROTINT 0x02 @@ -237,7 +239,9 @@ struct kvm_s390_sie_block { __u32 crycbd; /* 0x00fc */ __u64 gcr[16]; /* 0x0100 */ __u64 gbea; /* 0x0180 */ - __u8 reserved188[24]; /* 0x0188 */ + __u8 reserved188[8]; /* 0x0188 */ + __u64 sdnxo; /* 0x0190 */ + __u8 reserved198[8]; /* 0x0198 */ __u32 fac; /* 0x01a0 */ __u8 reserved1a4[20]; /* 0x01a4 */ __u64 cbrlo; /* 0x01b8 */ @@ -573,6 +577,7 @@ struct kvm_vcpu_arch { /* if vsie is active, currently executed shadow sie control block */ struct kvm_s390_sie_block *vsie_block; unsigned int host_acrs[NUM_ACRS]; + struct gs_cb *host_gscb; struct fpu host_fpregs; struct kvm_s390_local_interrupt local_int; struct hrtimer ckc_timer; @@ -593,6 +598,7 @@ struct kvm_vcpu_arch { */ seqcount_t cputm_seqcount; __u64 cputm_start; + bool gs_enabled; }; struct kvm_vm_stat { diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index a2ffec4139ad..5bd23cfd9ae5 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -197,6 +197,10 @@ struct kvm_guest_debug_arch { #define KVM_SYNC_VRS (1UL << 6) #define KVM_SYNC_RICCB (1UL << 7) #define KVM_SYNC_FPRS (1UL << 8) +#define KVM_SYNC_GSCB (1UL << 9) +/* length and alignment of the sdnx as a power of two */ +#define SDNXC 8 +#define SDNXL (1UL << SDNXC) /* definition of registers in kvm_run */ struct kvm_sync_regs { __u64 prefix; /* prefix register */ @@ -217,8 +221,16 @@ struct kvm_sync_regs { }; __u8 reserved[512]; /* for future vector expansion */ __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ - __u8 padding[52]; /* riccb needs to be 64byte aligned */ + __u8 padding1[52]; /* riccb needs to be 64byte aligned */ __u8 riccb[64]; /* runtime instrumentation controls block */ + __u8 padding2[192]; /* sdnx needs to be 256byte aligned */ + union { + __u8 sdnx[SDNXL]; /* state description annex */ + struct { + __u64 reserved1[2]; + __u64 gscb[4]; + }; + }; }; #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index e831f4b3e1c1..f5378f336127 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -35,6 +35,7 @@ static const intercept_handler_t instruction_handlers[256] = { [0xb6] = kvm_s390_handle_stctl, [0xb7] = kvm_s390_handle_lctl, [0xb9] = kvm_s390_handle_b9, + [0xe3] = kvm_s390_handle_e3, [0xe5] = kvm_s390_handle_e5, [0xeb] = kvm_s390_handle_eb, }; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 169558dc7daf..311eef0df855 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -410,6 +410,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, struct kvm_s390_mchk_info *mchk) { unsigned long ext_sa_addr; + unsigned long lc; freg_t fprs[NUM_FPRS]; union mci mci; int rc; @@ -422,8 +423,28 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, /* Extended save area */ rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr, sizeof(unsigned long)); - /* Only bits 0-53 are used for address formation */ - ext_sa_addr &= ~0x3ffUL; + /* Only bits 0 through 63-LC are used for address formation */ + lc = ext_sa_addr & MCESA_LC_MASK; + if (test_kvm_facility(vcpu->kvm, 133)) { + switch (lc) { + case 0: + case 10: + ext_sa_addr &= ~0x3ffUL; + break; + case 11: + ext_sa_addr &= ~0x7ffUL; + break; + case 12: + ext_sa_addr &= ~0xfffUL; + break; + default: + ext_sa_addr = 0; + break; + } + } else { + ext_sa_addr &= ~0x3ffUL; + } + if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) { if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs, 512)) @@ -431,6 +452,14 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, } else { mci.vr = 0; } + if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133) + && (lc == 11 || lc == 12)) { + if (write_guest_abs(vcpu, ext_sa_addr + 1024, + &vcpu->run->s.regs.gscb, 32)) + mci.gs = 0; + } else { + mci.gs = 0; + } /* General interruption information */ rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 976373c03138..f83f18b77f3d 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -405,6 +405,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_RI: r = test_facility(64); break; + case KVM_CAP_S390_GS: + r = test_facility(133); + break; default: r = 0; } @@ -541,6 +544,20 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s", r ? "(not available)" : "(success)"); break; + case KVM_CAP_S390_GS: + r = -EINVAL; + mutex_lock(&kvm->lock); + if (atomic_read(&kvm->online_vcpus)) { + r = -EBUSY; + } else if (test_facility(133)) { + set_kvm_facility(kvm->arch.model.fac_mask, 133); + set_kvm_facility(kvm->arch.model.fac_list, 133); + r = 0; + } + mutex_unlock(&kvm->lock); + VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s", + r ? "(not available)" : "(success)"); + break; case KVM_CAP_S390_USER_STSI: VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI"); kvm->arch.user_stsi = 1; @@ -1749,6 +1766,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) kvm_s390_set_prefix(vcpu, 0); if (test_kvm_facility(vcpu->kvm, 64)) vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; + if (test_kvm_facility(vcpu->kvm, 133)) + vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; /* fprs can be synchronized via vrs, even if the guest has no vx. With * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format. */ @@ -1993,6 +2012,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->eca |= ECA_VX; vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; } + vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx) + | SDNXC; vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; @@ -2720,8 +2741,10 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct runtime_instr_cb *riccb; + struct gs_cb *gscb; riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; + gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) @@ -2756,6 +2779,19 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)"); vcpu->arch.sie_block->ecb3 |= ECB3_RI; } + /* + * If userspace sets the gscb (e.g. after migration) to non-zero, + * we should enable GS here instead of doing the lazy enablement. + */ + if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) && + test_kvm_facility(vcpu->kvm, 133) && + gscb->gssm && + !vcpu->arch.gs_enabled) { + VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)"); + vcpu->arch.sie_block->ecb |= ECB_GS; + vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; + vcpu->arch.gs_enabled = 1; + } save_access_regs(vcpu->arch.host_acrs); restore_access_regs(vcpu->run->s.regs.acrs); /* save host (userspace) fprs/vrs */ @@ -2770,6 +2806,20 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (test_fp_ctl(current->thread.fpu.fpc)) /* User space provided an invalid FPC, let's clear it */ current->thread.fpu.fpc = 0; + if (MACHINE_HAS_GS) { + preempt_disable(); + __ctl_set_bit(2, 4); + if (current->thread.gs_cb) { + vcpu->arch.host_gscb = current->thread.gs_cb; + save_gs_cb(vcpu->arch.host_gscb); + } + if (vcpu->arch.gs_enabled) { + current->thread.gs_cb = (struct gs_cb *) + &vcpu->run->s.regs.gscb; + restore_gs_cb(current->thread.gs_cb); + } + preempt_enable(); + } kvm_run->kvm_dirty_regs = 0; } @@ -2796,6 +2846,18 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) /* Restore will be done lazily at return */ current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; + if (MACHINE_HAS_GS) { + __ctl_set_bit(2, 4); + if (vcpu->arch.gs_enabled) + save_gs_cb(current->thread.gs_cb); + preempt_disable(); + current->thread.gs_cb = vcpu->arch.host_gscb; + restore_gs_cb(vcpu->arch.host_gscb); + preempt_enable(); + if (!vcpu->arch.host_gscb) + __ctl_clear_bit(2, 4); + vcpu->arch.host_gscb = NULL; + } } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index dfdcde125af6..455124fe0647 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -246,6 +246,7 @@ static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu) int is_valid_psw(psw_t *psw); int kvm_s390_handle_aa(struct kvm_vcpu *vcpu); int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); +int kvm_s390_handle_e3(struct kvm_vcpu *vcpu); int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); int kvm_s390_handle_01(struct kvm_vcpu *vcpu); int kvm_s390_handle_b9(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index d42bb2d03041..0ffe973535fa 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -53,6 +53,33 @@ int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } +static int handle_gs(struct kvm_vcpu *vcpu) +{ + if (test_kvm_facility(vcpu->kvm, 133)) { + VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)"); + preempt_disable(); + __ctl_set_bit(2, 4); + current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb; + restore_gs_cb(current->thread.gs_cb); + preempt_enable(); + vcpu->arch.sie_block->ecb |= ECB_GS; + vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; + vcpu->arch.gs_enabled = 1; + kvm_s390_retry_instr(vcpu); + return 0; + } else + return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); +} + +int kvm_s390_handle_e3(struct kvm_vcpu *vcpu) +{ + int code = vcpu->arch.sie_block->ipb & 0xff; + + if (code == 0x49 || code == 0x4d) + return handle_gs(vcpu); + else + return -EOPNOTSUPP; +} /* Handle SCK (SET CLOCK) interception */ static int handle_set_clock(struct kvm_vcpu *vcpu) { diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index d91f1df5a854..2fafc2be777f 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -329,6 +329,11 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) /* Instruction Execution Prevention */ if (test_kvm_facility(vcpu->kvm, 130)) scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP; + /* Guarded Storage */ + if (test_kvm_facility(vcpu->kvm, 133)) { + scb_s->ecb |= scb_o->ecb & ECB_GS; + scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT; + } if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF)) scb_s->eca |= scb_o->eca & ECA_SII; if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB)) @@ -496,6 +501,13 @@ static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) unpin_guest_page(vcpu->kvm, gpa, hpa); scb_s->riccbd = 0; } + + hpa = scb_s->sdnxo; + if (hpa) { + gpa = scb_o->sdnxo; + unpin_guest_page(vcpu->kvm, gpa, hpa); + scb_s->sdnxo = 0; + } } /* @@ -590,6 +602,33 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) goto unpin; scb_s->riccbd = hpa; } + if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { + unsigned long sdnxc; + + gpa = scb_o->sdnxo & ~0xfUL; + sdnxc = scb_o->sdnxo & 0xfUL; + if (!gpa || !(gpa & ~0x1fffUL)) { + rc = set_validity_icpt(scb_s, 0x10b0U); + goto unpin; + } + if (sdnxc < 6 || sdnxc > 12) { + rc = set_validity_icpt(scb_s, 0x10b1U); + goto unpin; + } + if (gpa & ((1 << sdnxc) - 1)) { + rc = set_validity_icpt(scb_s, 0x10b2U); + goto unpin; + } + /* Due to alignment rules (checked above) this cannot + * cross page boundaries + */ + rc = pin_guest_page(vcpu->kvm, gpa, &hpa); + if (rc == -EINVAL) + rc = set_validity_icpt(scb_s, 0x10b0U); + if (rc) + goto unpin; + scb_s->sdnxo = hpa; + } return 0; unpin: unpin_blocks(vcpu, vsie_page); -- cgit v1.2.1 From 08fab50da669e5ee5a542592895fcb63be3cd7b1 Mon Sep 17 00:00:00 2001 From: Fei Li Date: Thu, 19 Jan 2017 17:02:26 +0100 Subject: KVM: s390: interface for suppressible I/O adapters In order to properly implement adapter-interruption suppression, we need a way for userspace to specify which adapters are subject to suppression. Let's convert the existing (and unused) 'pad' field into a 'flags' field and define a flag value for suppressible adapters. Besides, add documentation for the interface. Signed-off-by: Fei Li Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 1 + arch/s390/include/uapi/asm/kvm.h | 4 +++- arch/s390/kvm/interrupt.c | 2 ++ 3 files changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 1af090d93bf5..499c72c2280d 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -621,6 +621,7 @@ struct s390_io_adapter { bool maskable; bool masked; bool swap; + bool suppressible; struct rw_semaphore maps_lock; struct list_head maps; atomic_t nr_maps; diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 5bd23cfd9ae5..5fa144d1df0a 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -41,9 +41,11 @@ struct kvm_s390_io_adapter { __u8 isc; __u8 maskable; __u8 swap; - __u8 pad; + __u8 flags; }; +#define KVM_S390_ADAPTER_SUPPRESSIBLE 0x01 + #define KVM_S390_IO_ADAPTER_MASK 1 #define KVM_S390_IO_ADAPTER_MAP 2 #define KVM_S390_IO_ADAPTER_UNMAP 3 diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 311eef0df855..dba51ad62570 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1997,6 +1997,8 @@ static int register_io_adapter(struct kvm_device *dev, adapter->maskable = adapter_info.maskable; adapter->masked = false; adapter->swap = adapter_info.swap; + adapter->suppressible = (adapter_info.flags) & + KVM_S390_ADAPTER_SUPPRESSIBLE; dev->kvm->arch.adapters[adapter->id] = adapter; return 0; -- cgit v1.2.1 From 519783935451764b397f2a712de5ea778ff77fdf Mon Sep 17 00:00:00 2001 From: Fei Li Date: Fri, 17 Feb 2017 17:06:26 +0800 Subject: KVM: s390: introduce ais mode modify function Provide an interface for userspace to modify AIS (adapter-interruption-suppression) mode state, and add documentation for the interface. Allowed target modes are ALL-Interruptions mode and SINGLE-Interruption mode. We introduce the 'simm' and 'nimm' fields in kvm_s390_float_interrupt to store interruption modes for each ISC. Each bit in 'simm' and 'nimm' targets to one ISC, and collaboratively indicate three modes: ALL-Interruptions, SINGLE-Interruption and NO-Interruptions. This interface can initiate most transitions between the states; transition from SINGLE-Interruption to NO-Interruptions via adapter interrupt injection will be introduced in a following patch. The meaningful combinations are as follows: interruption mode | simm bit | nimm bit ------------------|----------|---------- ALL | 0 | 0 SINGLE | 1 | 0 NO | 1 | 1 Besides, add tracepoint to track AIS mode transitions. Co-Authored-By: Yi Min Zhao Signed-off-by: Yi Min Zhao Signed-off-by: Fei Li Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 10 ++++++++++ arch/s390/include/uapi/asm/kvm.h | 6 ++++++ arch/s390/kvm/interrupt.c | 43 ++++++++++++++++++++++++++++++++++++++++ arch/s390/kvm/kvm-s390.c | 4 ++++ arch/s390/kvm/trace-s390.h | 31 +++++++++++++++++++++++++++++ 5 files changed, 94 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 499c72c2280d..552c319483c6 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -521,6 +521,12 @@ struct kvm_s390_local_interrupt { #define FIRQ_CNTR_PFAULT 3 #define FIRQ_MAX_COUNT 4 +/* mask the AIS mode for a given ISC */ +#define AIS_MODE_MASK(isc) (0x80 >> isc) + +#define KVM_S390_AIS_MODE_ALL 0 +#define KVM_S390_AIS_MODE_SINGLE 1 + struct kvm_s390_float_interrupt { unsigned long pending_irqs; spinlock_t lock; @@ -530,6 +536,10 @@ struct kvm_s390_float_interrupt { struct kvm_s390_ext_info srv_signal; int next_rr_cpu; unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)]; + struct mutex ais_lock; + u8 simm; + u8 nimm; + int ais_enabled; }; struct kvm_hw_wp_info_arch { diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 5fa144d1df0a..50d2a927c990 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -26,6 +26,7 @@ #define KVM_DEV_FLIC_ADAPTER_REGISTER 6 #define KVM_DEV_FLIC_ADAPTER_MODIFY 7 #define KVM_DEV_FLIC_CLEAR_IO_IRQ 8 +#define KVM_DEV_FLIC_AISM 9 /* * We can have up to 4*64k pending subchannels + 8 adapter interrupts, * as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts. @@ -46,6 +47,11 @@ struct kvm_s390_io_adapter { #define KVM_S390_ADAPTER_SUPPRESSIBLE 0x01 +struct kvm_s390_ais_req { + __u8 isc; + __u16 mode; +}; + #define KVM_S390_IO_ADAPTER_MASK 1 #define KVM_S390_IO_ADAPTER_MAP 2 #define KVM_S390_IO_ADAPTER_UNMAP 3 diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index dba51ad62570..96b689e48c08 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2152,6 +2152,45 @@ static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr) return 0; } +static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr) +{ + struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; + struct kvm_s390_ais_req req; + int ret = 0; + + if (!fi->ais_enabled) + return -ENOTSUPP; + + if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) + return -EFAULT; + + if (req.isc > MAX_ISC) + return -EINVAL; + + trace_kvm_s390_modify_ais_mode(req.isc, + (fi->simm & AIS_MODE_MASK(req.isc)) ? + (fi->nimm & AIS_MODE_MASK(req.isc)) ? + 2 : KVM_S390_AIS_MODE_SINGLE : + KVM_S390_AIS_MODE_ALL, req.mode); + + mutex_lock(&fi->ais_lock); + switch (req.mode) { + case KVM_S390_AIS_MODE_ALL: + fi->simm &= ~AIS_MODE_MASK(req.isc); + fi->nimm &= ~AIS_MODE_MASK(req.isc); + break; + case KVM_S390_AIS_MODE_SINGLE: + fi->simm |= AIS_MODE_MASK(req.isc); + fi->nimm &= ~AIS_MODE_MASK(req.isc); + break; + default: + ret = -EINVAL; + } + mutex_unlock(&fi->ais_lock); + + return ret; +} + static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { int r = 0; @@ -2188,6 +2227,9 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) case KVM_DEV_FLIC_CLEAR_IO_IRQ: r = clear_io_irq(dev->kvm, attr); break; + case KVM_DEV_FLIC_AISM: + r = modify_ais_mode(dev->kvm, attr); + break; default: r = -EINVAL; } @@ -2207,6 +2249,7 @@ static int flic_has_attr(struct kvm_device *dev, case KVM_DEV_FLIC_ADAPTER_REGISTER: case KVM_DEV_FLIC_ADAPTER_MODIFY: case KVM_DEV_FLIC_CLEAR_IO_IRQ: + case KVM_DEV_FLIC_AISM: return 0; } return -ENXIO; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f83f18b77f3d..977cc1660a83 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1515,6 +1515,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm_s390_crypto_init(kvm); + mutex_init(&kvm->arch.float_int.ais_lock); + kvm->arch.float_int.simm = 0; + kvm->arch.float_int.nimm = 0; + kvm->arch.float_int.ais_enabled = 0; spin_lock_init(&kvm->arch.float_int.lock); for (i = 0; i < FIRQ_LIST_COUNT; i++) INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h index 396485bca191..b32994d1546a 100644 --- a/arch/s390/kvm/trace-s390.h +++ b/arch/s390/kvm/trace-s390.h @@ -280,6 +280,37 @@ TRACE_EVENT(kvm_s390_enable_disable_ibs, __entry->state ? "enabling" : "disabling", __entry->id) ); +/* + * Trace point for modifying ais mode for a given isc. + */ +TRACE_EVENT(kvm_s390_modify_ais_mode, + TP_PROTO(__u8 isc, __u16 from, __u16 to), + TP_ARGS(isc, from, to), + + TP_STRUCT__entry( + __field(__u8, isc) + __field(__u16, from) + __field(__u16, to) + ), + + TP_fast_assign( + __entry->isc = isc; + __entry->from = from; + __entry->to = to; + ), + + TP_printk("for isc %x, modifying interruption mode from %s to %s", + __entry->isc, + (__entry->from == KVM_S390_AIS_MODE_ALL) ? + "ALL-Interruptions Mode" : + (__entry->from == KVM_S390_AIS_MODE_SINGLE) ? + "Single-Interruption Mode" : "No-Interruptions Mode", + (__entry->to == KVM_S390_AIS_MODE_ALL) ? + "ALL-Interruptions Mode" : + (__entry->to == KVM_S390_AIS_MODE_SINGLE) ? + "Single-Interruption Mode" : "No-Interruptions Mode") + ); + #endif /* _TRACE_KVMS390_H */ -- cgit v1.2.1 From a8920950131b1394f9e99ff57a5cf5ceeb0cc25c Mon Sep 17 00:00:00 2001 From: Yi Min Zhao Date: Mon, 20 Feb 2017 10:15:01 +0800 Subject: KVM: s390: introduce adapter interrupt inject function Inject adapter interrupts on a specified adapter which allows to retrieve the adapter flags, e.g. if the adapter is subject to AIS facility or not. And add documentation for this interface. For adapters subject to AIS, handle the airq injection suppression for a given ISC according to the interruption mode: - before injection, if NO-Interruptions Mode, just return 0 and suppress, otherwise, allow the injection. - after injection, if SINGLE-Interruption Mode, change it to NO-Interruptions Mode to suppress the following interrupts. Besides, add tracepoint for suppressed airq and AIS mode transitions. Signed-off-by: Yi Min Zhao Signed-off-by: Fei Li Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/kvm.h | 1 + arch/s390/kvm/interrupt.c | 53 +++++++++++++++++++++++++++++++++++----- arch/s390/kvm/trace-s390.h | 21 ++++++++++++++++ 3 files changed, 69 insertions(+), 6 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 50d2a927c990..2c9ad251fa33 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -27,6 +27,7 @@ #define KVM_DEV_FLIC_ADAPTER_MODIFY 7 #define KVM_DEV_FLIC_CLEAR_IO_IRQ 8 #define KVM_DEV_FLIC_AISM 9 +#define KVM_DEV_FLIC_AIRQ_INJECT 10 /* * We can have up to 4*64k pending subchannels + 8 adapter interrupts, * as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts. diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 96b689e48c08..482673e3436d 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2191,6 +2191,48 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr) return ret; } +static int kvm_s390_inject_airq(struct kvm *kvm, + struct s390_io_adapter *adapter) +{ + struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; + struct kvm_s390_interrupt s390int = { + .type = KVM_S390_INT_IO(1, 0, 0, 0), + .parm = 0, + .parm64 = (adapter->isc << 27) | 0x80000000, + }; + int ret = 0; + + if (!fi->ais_enabled || !adapter->suppressible) + return kvm_s390_inject_vm(kvm, &s390int); + + mutex_lock(&fi->ais_lock); + if (fi->nimm & AIS_MODE_MASK(adapter->isc)) { + trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc); + goto out; + } + + ret = kvm_s390_inject_vm(kvm, &s390int); + if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) { + fi->nimm |= AIS_MODE_MASK(adapter->isc); + trace_kvm_s390_modify_ais_mode(adapter->isc, + KVM_S390_AIS_MODE_SINGLE, 2); + } +out: + mutex_unlock(&fi->ais_lock); + return ret; +} + +static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr) +{ + unsigned int id = attr->attr; + struct s390_io_adapter *adapter = get_io_adapter(kvm, id); + + if (!adapter) + return -EINVAL; + + return kvm_s390_inject_airq(kvm, adapter); +} + static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { int r = 0; @@ -2230,6 +2272,9 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) case KVM_DEV_FLIC_AISM: r = modify_ais_mode(dev->kvm, attr); break; + case KVM_DEV_FLIC_AIRQ_INJECT: + r = flic_inject_airq(dev->kvm, attr); + break; default: r = -EINVAL; } @@ -2250,6 +2295,7 @@ static int flic_has_attr(struct kvm_device *dev, case KVM_DEV_FLIC_ADAPTER_MODIFY: case KVM_DEV_FLIC_CLEAR_IO_IRQ: case KVM_DEV_FLIC_AISM: + case KVM_DEV_FLIC_AIRQ_INJECT: return 0; } return -ENXIO; @@ -2360,12 +2406,7 @@ static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e, ret = adapter_indicators_set(kvm, adapter, &e->adapter); up_read(&adapter->maps_lock); if ((ret > 0) && !adapter->masked) { - struct kvm_s390_interrupt s390int = { - .type = KVM_S390_INT_IO(1, 0, 0, 0), - .parm = 0, - .parm64 = (adapter->isc << 27) | 0x80000000, - }; - ret = kvm_s390_inject_vm(kvm, &s390int); + ret = kvm_s390_inject_airq(kvm, adapter); if (ret == 0) ret = 1; } diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h index b32994d1546a..78b7e847984a 100644 --- a/arch/s390/kvm/trace-s390.h +++ b/arch/s390/kvm/trace-s390.h @@ -311,6 +311,27 @@ TRACE_EVENT(kvm_s390_modify_ais_mode, "Single-Interruption Mode" : "No-Interruptions Mode") ); +/* + * Trace point for suppressed adapter I/O interrupt. + */ +TRACE_EVENT(kvm_s390_airq_suppressed, + TP_PROTO(__u32 id, __u8 isc), + TP_ARGS(id, isc), + + TP_STRUCT__entry( + __field(__u32, id) + __field(__u8, isc) + ), + + TP_fast_assign( + __entry->id = id; + __entry->isc = isc; + ), + + TP_printk("adapter I/O interrupt suppressed (id:%x isc:%x)", + __entry->id, __entry->isc) + ); + #endif /* _TRACE_KVMS390_H */ -- cgit v1.2.1 From 47a4693e1d3eb09e523c223753fb5a97721f49b8 Mon Sep 17 00:00:00 2001 From: Yi Min Zhao Date: Fri, 10 Mar 2017 09:29:38 +0100 Subject: KVM: s390: introduce AIS capability Introduce a cap to enable AIS facility bit, and add documentation for this capability. Signed-off-by: Yi Min Zhao Signed-off-by: Fei Li Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 977cc1660a83..11b7d6638991 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -380,6 +380,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_SKEYS: case KVM_CAP_S390_IRQ_STATE: case KVM_CAP_S390_USER_INSTR0: + case KVM_CAP_S390_AIS: r = 1; break; case KVM_CAP_S390_MEM_OP: @@ -544,6 +545,20 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s", r ? "(not available)" : "(success)"); break; + case KVM_CAP_S390_AIS: + mutex_lock(&kvm->lock); + if (kvm->created_vcpus) { + r = -EBUSY; + } else { + set_kvm_facility(kvm->arch.model.fac_mask, 72); + set_kvm_facility(kvm->arch.model.fac_list, 72); + kvm->arch.float_int.ais_enabled = 1; + r = 0; + } + mutex_unlock(&kvm->lock); + VM_EVENT(kvm, 3, "ENABLE: AIS %s", + r ? "(not available)" : "(success)"); + break; case KVM_CAP_S390_GS: r = -EINVAL; mutex_lock(&kvm->lock); -- cgit v1.2.1 From fe722d13e3638470a7eef5dad6c9ded7f6653773 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Fri, 7 Apr 2017 14:23:13 +0200 Subject: KVM: s390: Fix sdnxo setting for nested guests If the guest does not use the host register management, but it uses the sdnx area, we must fill in a proper sdnxo value (address of sdnx and the sdnxc). Reported-by: David Hildenbrand Signed-off-by: Christian Borntraeger Acked-by: Cornelia Huck --- arch/s390/kvm/vsie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 2fafc2be777f..025b1f2e17a9 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -627,7 +627,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) rc = set_validity_icpt(scb_s, 0x10b0U); if (rc) goto unpin; - scb_s->sdnxo = hpa; + scb_s->sdnxo = hpa | sdnxc; } return 0; unpin: -- cgit v1.2.1 From 80248559992a05573b2db7865490eb82c745db87 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Wed, 12 Apr 2017 12:59:59 +0200 Subject: KVM: s390: fix stale machine check data for guarded storage When delivering a machine check the CPU state is "loaded", which means that some registers are already in the host registers. Before writing the register content into the machine check save area, we must make sure that we save the content of the registers into the data structures that are used for delivering the machine check. We already do the right thing for access, vector/floating point registers, let's do the same for guarded storage. Fixes: 4e0b1ab72b8a ("KVM: s390: gs support for kvm guests") Signed-off-by: Christian Borntraeger Reviewed-by: Cornelia Huck --- arch/s390/kvm/interrupt.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 482673e3436d..caf15c8a8948 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -419,6 +419,8 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, /* take care of lazy register loading */ save_fpu_regs(); save_access_regs(vcpu->run->s.regs.acrs); + if (MACHINE_HAS_GS && vcpu->arch.gs_enabled) + save_gs_cb(current->thread.gs_cb); /* Extended save area */ rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr, -- cgit v1.2.1 From 71cb1bf66ef0d1d36ff3f04150450df9dd8af654 Mon Sep 17 00:00:00 2001 From: Farhan Ali Date: Thu, 23 Feb 2017 13:39:17 -0500 Subject: s390/sclp: Detect KSS facility Let's detect the keyless subset facility. Signed-off-by: Farhan Ali Reviewed-by: Cornelia Huck Reviewed-by: Christian Borntraeger Acked-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/sclp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index ace3bd315438..6f5167bc1928 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -75,6 +75,7 @@ struct sclp_info { unsigned char has_pfmfi : 1; unsigned char has_ibs : 1; unsigned char has_skey : 1; + unsigned char has_kss : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; -- cgit v1.2.1 From 730cd632c4e485b90f97fe998e300045da094938 Mon Sep 17 00:00:00 2001 From: Farhan Ali Date: Fri, 24 Feb 2017 16:12:56 -0500 Subject: KVM: s390: Support keyless subset guest mode If the KSS facility is available on the machine, we also make it available for our KVM guests. The KSS facility bypasses storage key management as long as the guest does not issue a related instruction. When that happens, the control is returned to the host, which has to turn off KSS for a guest vcpu before retrying the instruction. Signed-off-by: Corey S. McQuay Signed-off-by: Farhan Ali Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 2 ++ arch/s390/include/uapi/asm/kvm.h | 1 + arch/s390/kvm/intercept.c | 3 +++ arch/s390/kvm/kvm-s390.c | 8 +++++++- arch/s390/kvm/kvm-s390.h | 1 + arch/s390/kvm/priv.c | 19 +++++++++++++------ arch/s390/kvm/vsie.c | 6 +++++- 7 files changed, 32 insertions(+), 8 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 552c319483c6..426614a882a9 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -122,6 +122,7 @@ struct esca_block { #define CPUSTAT_SLSR 0x00002000 #define CPUSTAT_ZARCH 0x00000800 #define CPUSTAT_MCDS 0x00000100 +#define CPUSTAT_KSS 0x00000200 #define CPUSTAT_SM 0x00000080 #define CPUSTAT_IBS 0x00000040 #define CPUSTAT_GED2 0x00000010 @@ -185,6 +186,7 @@ struct kvm_s390_sie_block { #define ICPT_OPEREXC 0x2C #define ICPT_PARTEXEC 0x38 #define ICPT_IOINST 0x40 +#define ICPT_KSS 0x5c __u8 icptcode; /* 0x0050 */ __u8 icptstatus; /* 0x0051 */ __u16 ihcpu; /* 0x0052 */ diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 2c9ad251fa33..bf9267930939 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -119,6 +119,7 @@ struct kvm_s390_vm_cpu_machine { #define KVM_S390_VM_CPU_FEAT_CMMA 10 #define KVM_S390_VM_CPU_FEAT_PFMFI 11 #define KVM_S390_VM_CPU_FEAT_SIGPIF 12 +#define KVM_S390_VM_CPU_FEAT_KSS 13 struct kvm_s390_vm_cpu_feat { __u64 feat[16]; }; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index f5378f336127..a4752bf6b526 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -426,6 +426,9 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) case ICPT_PARTEXEC: rc = handle_partial_execution(vcpu); break; + case ICPT_KSS: + rc = kvm_s390_skey_check_enable(vcpu); + break; default: return -EOPNOTSUPP; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 11b7d6638991..8771fef112a1 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -300,6 +300,8 @@ static void kvm_s390_cpu_feat_init(void) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI); if (sclp.has_ibs) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS); + if (sclp.has_kss) + allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS); /* * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make * all skey handling functions read/set the skey from the PGSTE @@ -2034,7 +2036,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx) | SDNXC; vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; - vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; + + if (sclp.has_kss) + atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags); + else + vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; if (vcpu->kvm->arch.use_cmma) { rc = kvm_s390_vcpu_setup_cmma(vcpu); diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 455124fe0647..55f5c8457d6d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -254,6 +254,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu); int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu); int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu); int kvm_s390_handle_eb(struct kvm_vcpu *vcpu); +int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu); /* implemented in vsie.c */ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 0ffe973535fa..c03106c428cf 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -198,18 +198,25 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) return 0; } -static int __skey_check_enable(struct kvm_vcpu *vcpu) +int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu) { int rc = 0; + struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; trace_kvm_s390_skey_related_inst(vcpu); - if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) + if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) && + !(atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS)) return rc; rc = s390_enable_skey(); VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc); - if (!rc) - vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); + if (!rc) { + if (atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS) + atomic_andnot(CPUSTAT_KSS, &sie_block->cpuflags); + else + sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | + ICTL_RRBE); + } return rc; } @@ -218,7 +225,7 @@ static int try_handle_skey(struct kvm_vcpu *vcpu) int rc; vcpu->stat.instruction_storage_key++; - rc = __skey_check_enable(vcpu); + rc = kvm_s390_skey_check_enable(vcpu); if (rc) return rc; if (sclp.has_skey) { @@ -916,7 +923,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) } if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { - int rc = __skey_check_enable(vcpu); + int rc = kvm_s390_skey_check_enable(vcpu); if (rc) return rc; diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 025b1f2e17a9..4719ecb9ab42 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -117,6 +117,8 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) newflags |= cpuflags & CPUSTAT_SM; if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS)) newflags |= cpuflags & CPUSTAT_IBS; + if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS)) + newflags |= cpuflags & CPUSTAT_KSS; atomic_set(&scb_s->cpuflags, newflags); return 0; @@ -289,7 +291,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) * bits. Therefore we cannot provide interpretation and would later * have to provide own emulation handlers. */ - scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; + if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS)) + scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; + scb_s->icpua = scb_o->icpua; if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM)) -- cgit v1.2.1 From e000b8e0968dd7bfa09c444607ce1e48e57aafd3 Mon Sep 17 00:00:00 2001 From: "Jason J. Herne" Date: Mon, 20 Mar 2017 09:57:42 -0400 Subject: s390: kvm: Cpu model support for msa6, msa7 and msa8 msa6 and msa7 require no changes. msa8 adds kma instruction and feature area. Signed-off-by: Jason J. Herne Reviewed-by: Christian Borntraeger Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/kvm.h | 3 ++- arch/s390/kvm/kvm-s390.c | 4 ++++ arch/s390/tools/gen_facilities.c | 1 + 3 files changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index bf9267930939..3dd2a1d308dd 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -141,7 +141,8 @@ struct kvm_s390_vm_cpu_subfunc { __u8 kmo[16]; /* with MSA4 */ __u8 pcc[16]; /* with MSA4 */ __u8 ppno[16]; /* with MSA5 */ - __u8 reserved[1824]; + __u8 kma[16]; /* with MSA8 */ + __u8 reserved[1808]; }; /* kvm attributes for crypto */ diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8771fef112a1..7eb1275cc265 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -276,6 +276,10 @@ static void kvm_s390_cpu_feat_init(void) __cpacf_query(CPACF_PPNO, (cpacf_mask_t *) kvm_s390_available_subfunc.ppno); + if (test_facility(146)) /* MSA8 */ + __cpacf_query(CPACF_KMA, (cpacf_mask_t *) + kvm_s390_available_subfunc.kma); + if (MACHINE_HAS_ESOP) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP); /* diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c index 0cf802de52a1..be63fbd699fd 100644 --- a/arch/s390/tools/gen_facilities.c +++ b/arch/s390/tools/gen_facilities.c @@ -82,6 +82,7 @@ static struct facility_def facility_defs[] = { 78, /* enhanced-DAT 2 */ 130, /* instruction-execution-protection */ 131, /* enhanced-SOP 2 and side-effect */ + 146, /* msa extension 8 */ -1 /* END */ } }, -- cgit v1.2.1 From 72875d8a4d92f6f37e051be522b2252fd49bd50e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= Date: Wed, 26 Apr 2017 22:32:19 +0200 Subject: KVM: add kvm_{test,clear}_request to replace {test,clear}_bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Users were expected to use kvm_check_request() for testing and clearing, but request have expanded their use since then and some users want to only test or do a faster clear. Make sure that requests are not directly accessed with bit operations. Reviewed-by: Christian Borntraeger Signed-off-by: Radim Krčmář Reviewed-by: Andrew Jones Reviewed-by: Cornelia Huck Signed-off-by: Paolo Bonzini --- arch/s390/kvm/kvm-s390.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 7eb1275cc265..4bafb0a0c8b5 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2496,7 +2496,7 @@ retry: } /* nothing to do, just clear the request */ - clear_bit(KVM_REQ_UNHALT, &vcpu->requests); + kvm_clear_request(KVM_REQ_UNHALT, vcpu); return 0; } -- cgit v1.2.1