summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/boot/Makefile7
-rw-r--r--arch/s390/boot/compressed/Makefile4
-rw-r--r--arch/s390/configs/debug_defconfig2
-rw-r--r--arch/s390/configs/performance_defconfig2
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/ap.h11
-rw-r--r--arch/s390/include/asm/cio.h1
-rw-r--r--arch/s390/include/asm/elf.h11
-rw-r--r--arch/s390/include/asm/irq.h1
-rw-r--r--arch/s390/include/asm/isc.h1
-rw-r--r--arch/s390/include/asm/kvm_host.h39
-rw-r--r--arch/s390/include/asm/lowcore.h61
-rw-r--r--arch/s390/include/uapi/asm/Kbuild2
-rw-r--r--arch/s390/kernel/crash_dump.c3
-rw-r--r--arch/s390/kernel/irq.c1
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c19
-rw-r--r--arch/s390/kernel/setup.c16
-rw-r--r--arch/s390/kernel/smp.c12
-rw-r--r--arch/s390/kernel/topology.c6
-rw-r--r--arch/s390/kernel/vtime.c19
-rw-r--r--arch/s390/kvm/interrupt.c431
-rw-r--r--arch/s390/kvm/kvm-s390.c190
-rw-r--r--arch/s390/kvm/kvm-s390.h4
-rw-r--r--arch/s390/numa/mode_emu.c3
-rw-r--r--arch/s390/numa/numa.c6
-rw-r--r--arch/s390/scripts/Makefile.chkbss25
26 files changed, 739 insertions, 139 deletions
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index d5ad724f5c96..c844eaf24ed7 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -57,9 +57,6 @@ $(obj)/section_cmp%: vmlinux $(obj)/compressed/vmlinux FORCE
$(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
-quiet_cmd_ar = AR $@
- cmd_ar = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(filter $(OBJECTS), $^)
-
$(obj)/startup.a: $(OBJECTS) FORCE
$(call if_changed,ar)
@@ -67,6 +64,6 @@ install: $(CONFIGURE) $(obj)/bzImage
sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
System.map "$(INSTALL_PATH)"
-chkbss := $(OBJECTS)
-chkbss-target := $(obj)/startup.a
+chkbss := $(obj-y)
+chkbss-target := startup.a
include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index b1bdd15e3429..fa529c5b4486 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -63,6 +63,6 @@ OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section
$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE
$(call if_changed,objcopy)
-chkbss := $(filter-out $(obj)/piggy.o $(obj)/info.o,$(OBJECTS))
-chkbss-target := $(obj)/vmlinux.bin
+chkbss := $(filter-out piggy.o info.o, $(obj-y))
+chkbss-target := vmlinux.bin
include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index c69cb04b7a59..9824c7bad9d4 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -500,7 +500,6 @@ CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -520,6 +519,7 @@ CONFIG_BTRFS_DEBUG=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
+CONFIG_FS_ENCRYPTION=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 32f539dc9c19..4fcbe5792744 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -497,7 +497,6 @@ CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -515,6 +514,7 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
+CONFIG_FS_ENCRYPTION=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index e3239772887a..12d77cb11fe5 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -20,7 +20,6 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += preempt.h
generic-y += rwsem.h
generic-y += trace_clock.h
generic-y += unaligned.h
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index 1a6a7092d942..e94a0a28b5eb 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -360,4 +360,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
return reg1;
}
+/*
+ * Interface to tell the AP bus code that a configuration
+ * change has happened. The bus code should at least do
+ * an ap bus resource rescan.
+ */
+#if IS_ENABLED(CONFIG_ZCRYPT)
+void ap_bus_cfg_chg(void);
+#else
+static inline void ap_bus_cfg_chg(void){};
+#endif
+
#endif /* _ASM_S390_AP_H_ */
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 225667652069..1727180e8ca1 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -331,5 +331,6 @@ extern void css_schedule_reprobe(void);
/* Function from drivers/s390/cio/chsc.c */
int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta);
int chsc_sstpi(void *page, void *result, size_t size);
+int chsc_sgib(u32 origin);
#endif
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 7d22a474a040..f74639a05f0f 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -252,11 +252,14 @@ do { \
/*
* Cache aliasing on the latest machines calls for a mapping granularity
- * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
- * of up to 1GB. For 31-bit processes the virtual address space is limited,
- * use no alignment and limit the randomization to 8MB.
+ * of 512KB for the anonymous mapping base. For 64-bit processes use a
+ * 512KB alignment and a randomization of up to 1GB. For 31-bit processes
+ * the virtual address space is limited, use no alignment and limit the
+ * randomization to 8MB.
+ * For the additional randomization of the program break use 32MB for
+ * 64-bit and 8MB for 31-bit.
*/
-#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL)
+#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL)
#define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL)
#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL)
#define STACK_RND_MASK MMAP_RND_MASK
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 2f7f27e5493f..afaf5e3c57fd 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -62,6 +62,7 @@ enum interruption_class {
IRQIO_MSI,
IRQIO_VIR,
IRQIO_VAI,
+ IRQIO_GAL,
NMI_NMI,
CPU_RST,
NR_ARCH_IRQS
diff --git a/arch/s390/include/asm/isc.h b/arch/s390/include/asm/isc.h
index 6cb9e2ed05b6..b2cc1ec78d06 100644
--- a/arch/s390/include/asm/isc.h
+++ b/arch/s390/include/asm/isc.h
@@ -21,6 +21,7 @@
/* Adapter interrupts. */
#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
#define PCI_ISC 2 /* PCI I/O subchannels */
+#define GAL_ISC 5 /* GIB alert */
#define AP_ISC 6 /* adjunct processor (crypto) devices */
/* Functions for registration of I/O interruption subclasses */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d5d24889c3bc..c47e22bba87f 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -591,7 +591,6 @@ struct kvm_s390_float_interrupt {
struct kvm_s390_mchk_info mchk;
struct kvm_s390_ext_info srv_signal;
int next_rr_cpu;
- unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
struct mutex ais_lock;
u8 simm;
u8 nimm;
@@ -712,6 +711,7 @@ struct s390_io_adapter {
struct kvm_s390_cpu_model {
/* facility mask supported by kvm & hosting machine */
__u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
+ struct kvm_s390_vm_cpu_subfunc subfuncs;
/* facility list requested by guest (in dma page) */
__u64 *fac_list;
u64 cpuid;
@@ -782,9 +782,21 @@ struct kvm_s390_gisa {
u8 reserved03[11];
u32 airq_count;
} g1;
+ struct {
+ u64 word[4];
+ } u64;
};
};
+struct kvm_s390_gib {
+ u32 alert_list_origin;
+ u32 reserved01;
+ u8:5;
+ u8 nisc:3;
+ u8 reserved03[3];
+ u32 reserved04[5];
+};
+
/*
* sie_page2 has to be allocated as DMA because fac_list, crycb and
* gisa need 31bit addresses in the sie control block.
@@ -793,7 +805,8 @@ struct sie_page2 {
__u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64]; /* 0x0000 */
struct kvm_s390_crypto_cb crycb; /* 0x0800 */
struct kvm_s390_gisa gisa; /* 0x0900 */
- u8 reserved920[0x1000 - 0x920]; /* 0x0920 */
+ struct kvm *kvm; /* 0x0920 */
+ u8 reserved928[0x1000 - 0x928]; /* 0x0928 */
};
struct kvm_s390_vsie {
@@ -804,6 +817,20 @@ struct kvm_s390_vsie {
struct page *pages[KVM_MAX_VCPUS];
};
+struct kvm_s390_gisa_iam {
+ u8 mask;
+ spinlock_t ref_lock;
+ u32 ref_count[MAX_ISC + 1];
+};
+
+struct kvm_s390_gisa_interrupt {
+ struct kvm_s390_gisa *origin;
+ struct kvm_s390_gisa_iam alert;
+ struct hrtimer timer;
+ u64 expires;
+ DECLARE_BITMAP(kicked_mask, KVM_MAX_VCPUS);
+};
+
struct kvm_arch{
void *sca;
int use_esca;
@@ -837,7 +864,8 @@ struct kvm_arch{
atomic64_t cmma_dirty_pages;
/* subset of available cpu features enabled by user space */
DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
- struct kvm_s390_gisa *gisa;
+ DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
+ struct kvm_s390_gisa_interrupt gisa_int;
};
#define KVM_HVA_ERR_BAD (-1UL)
@@ -871,6 +899,9 @@ void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
extern char sie_exit;
+extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
+extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
+
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_check_processor_compat(void *rtn) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
@@ -878,7 +909,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index cc0947e08b6f..5b9f10b1e55d 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -91,52 +91,53 @@ struct lowcore {
__u64 hardirq_timer; /* 0x02e8 */
__u64 softirq_timer; /* 0x02f0 */
__u64 steal_timer; /* 0x02f8 */
- __u64 last_update_timer; /* 0x0300 */
- __u64 last_update_clock; /* 0x0308 */
- __u64 int_clock; /* 0x0310 */
- __u64 mcck_clock; /* 0x0318 */
- __u64 clock_comparator; /* 0x0320 */
- __u64 boot_clock[2]; /* 0x0328 */
+ __u64 avg_steal_timer; /* 0x0300 */
+ __u64 last_update_timer; /* 0x0308 */
+ __u64 last_update_clock; /* 0x0310 */
+ __u64 int_clock; /* 0x0318*/
+ __u64 mcck_clock; /* 0x0320 */
+ __u64 clock_comparator; /* 0x0328 */
+ __u64 boot_clock[2]; /* 0x0330 */
/* Current process. */
- __u64 current_task; /* 0x0338 */
- __u64 kernel_stack; /* 0x0340 */
+ __u64 current_task; /* 0x0340 */
+ __u64 kernel_stack; /* 0x0348 */
/* Interrupt, DAT-off and restartstack. */
- __u64 async_stack; /* 0x0348 */
- __u64 nodat_stack; /* 0x0350 */
- __u64 restart_stack; /* 0x0358 */
+ __u64 async_stack; /* 0x0350 */
+ __u64 nodat_stack; /* 0x0358 */
+ __u64 restart_stack; /* 0x0360 */
/* Restart function and parameter. */
- __u64 restart_fn; /* 0x0360 */
- __u64 restart_data; /* 0x0368 */
- __u64 restart_source; /* 0x0370 */
+ __u64 restart_fn; /* 0x0368 */
+ __u64 restart_data; /* 0x0370 */
+ __u64 restart_source; /* 0x0378 */
/* Address space pointer. */
- __u64 kernel_asce; /* 0x0378 */
- __u64 user_asce; /* 0x0380 */
- __u64 vdso_asce; /* 0x0388 */
+ __u64 kernel_asce; /* 0x0380 */
+ __u64 user_asce; /* 0x0388 */
+ __u64 vdso_asce; /* 0x0390 */
/*
* The lpp and current_pid fields form a
* 64-bit value that is set as program
* parameter with the LPP instruction.
*/
- __u32 lpp; /* 0x0390 */
- __u32 current_pid; /* 0x0394 */
+ __u32 lpp; /* 0x0398 */
+ __u32 current_pid; /* 0x039c */
/* SMP info area */
- __u32 cpu_nr; /* 0x0398 */
- __u32 softirq_pending; /* 0x039c */
- __u32 preempt_count; /* 0x03a0 */
- __u32 spinlock_lockval; /* 0x03a4 */
- __u32 spinlock_index; /* 0x03a8 */
- __u32 fpu_flags; /* 0x03ac */
- __u64 percpu_offset; /* 0x03b0 */
- __u64 vdso_per_cpu_data; /* 0x03b8 */
- __u64 machine_flags; /* 0x03c0 */
- __u64 gmap; /* 0x03c8 */
- __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */
+ __u32 cpu_nr; /* 0x03a0 */
+ __u32 softirq_pending; /* 0x03a4 */
+ __u32 preempt_count; /* 0x03a8 */
+ __u32 spinlock_lockval; /* 0x03ac */
+ __u32 spinlock_index; /* 0x03b0 */
+ __u32 fpu_flags; /* 0x03b4 */
+ __u64 percpu_offset; /* 0x03b8 */
+ __u64 vdso_per_cpu_data; /* 0x03c0 */
+ __u64 machine_flags; /* 0x03c8 */
+ __u64 gmap; /* 0x03d0 */
+ __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */
/* br %r1 trampoline */
__u16 br_r1_trampoline; /* 0x0400 */
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index 6b0f30b14642..46c1ff0b842a 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -1,6 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
generated-y += unistd_64.h
-generic-y += socket.h
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 97eae3871868..f96a5857bbfd 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -61,6 +61,9 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
struct save_area *sa;
sa = (void *) memblock_phys_alloc(sizeof(*sa), 8);
+ if (!sa)
+ panic("Failed to allocate save area\n");
+
if (is_boot_cpu)
list_add(&sa->list, &dump_save_areas);
else
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 0e8d68bac82c..0cd5a5f96729 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -88,6 +88,7 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQIO_MSI, .name = "MSI", .desc = "[I/O] MSI Interrupt" },
{.irq = IRQIO_VIR, .name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
{.irq = IRQIO_VAI, .name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"},
+ {.irq = IRQIO_GAL, .name = "GAL", .desc = "[I/O] GIB Alert"},
{.irq = NMI_NMI, .name = "NMI", .desc = "[NMI] Machine Check"},
{.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"},
};
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
index c6fad208c2fa..b6854812d2ed 100644
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event)
*/
static int __hw_perf_event_init(struct perf_event *event)
{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct perf_event_attr *attr = &event->attr;
+ struct cpu_cf_events *cpuhw;
enum cpumf_ctr_set i;
int err = 0;
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s event %p cpu %d authorized %#x\n", __func__,
- event, event->cpu, cpuhw->info.auth_ctl);
+ debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
+ event, event->cpu);
event->hw.config = attr->config;
event->hw.config_base = 0;
- local64_set(&event->count, 0);
- /* Add all authorized counter sets to config_base */
+ /* Add all authorized counter sets to config_base. The
+ * the hardware init function is either called per-cpu or just once
+ * for all CPUS (event->cpu == -1). This depends on the whether
+ * counting is started for all CPUs or on a per workload base where
+ * the perf event moves from one CPU to another CPU.
+ * Checking the authorization on any CPU is fine as the hardware
+ * applies the same authorization settings to all CPUs.
+ */
+ cpuhw = &get_cpu_var(cpu_cf_events);
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
event->hw.config_base |= cpumf_ctr_ctl[i];
+ put_cpu_var(cpu_cf_events);
/* No authorized counter sets, nothing to count/sample */
if (!event->hw.config_base) {
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 12934e8fbb91..2c642af526ce 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -378,6 +378,10 @@ static void __init setup_lowcore_dat_off(void)
*/
BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
+ if (!lc)
+ panic("%s: Failed to allocate %zu bytes align=%zx\n",
+ __func__, sizeof(*lc), sizeof(*lc));
+
lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr = (unsigned long) restart_int_handler;
lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
@@ -419,6 +423,9 @@ static void __init setup_lowcore_dat_off(void)
* all CPUs in cast *one* of them does a PSW restart.
*/
restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
+ if (!restart_stack)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, THREAD_SIZE, THREAD_SIZE);
restart_stack += STACK_INIT_OFFSET;
/*
@@ -495,6 +502,9 @@ static void __init setup_resources(void)
for_each_memblock(memory, reg) {
res = memblock_alloc(sizeof(*res), 8);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*res), 8);
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
res->name = "System RAM";
@@ -509,6 +519,9 @@ static void __init setup_resources(void)
continue;
if (std_res->end > res->end) {
sub_res = memblock_alloc(sizeof(*sub_res), 8);
+ if (!sub_res)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*sub_res), 8);
*sub_res = *std_res;
sub_res->end = res->end;
std_res->start = res->end + 1;
@@ -966,6 +979,9 @@ static void __init setup_randomness(void)
vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
PAGE_SIZE);
+ if (!vmms)
+ panic("Failed to allocate memory for sysinfo structure\n");
+
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
memblock_free((unsigned long) vmms, PAGE_SIZE);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b198ece2aad6..bd197baf1dc3 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -266,7 +266,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
lc->percpu_offset = __per_cpu_offset[cpu];
lc->kernel_asce = S390_lowcore.kernel_asce;
lc->machine_flags = S390_lowcore.machine_flags;
- lc->user_timer = lc->system_timer = lc->steal_timer = 0;
+ lc->user_timer = lc->system_timer =
+ lc->steal_timer = lc->avg_steal_timer = 0;
__ctl_store(lc->cregs_save_area, 0, 15);
save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
@@ -656,7 +657,11 @@ void __init smp_save_dump_cpus(void)
/* No previous system present, normal boot. */
return;
/* Allocate a page as dumping area for the store status sigps */
- page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
+ page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31);
+ if (!page)
+ panic("ERROR: Failed to allocate %lx bytes below %lx\n",
+ PAGE_SIZE, 1UL << 31);
+
/* Set multi-threading state to the previous system. */
pcpu_set_smt(sclp.mtid_prev);
boot_cpu_addr = stap();
@@ -766,6 +771,9 @@ void __init smp_detect_cpus(void)
/* Get CPU information */
info = memblock_alloc(sizeof(*info), 8);
+ if (!info)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*info), 8);
smp_get_core_info(info, 1);
/* Find boot CPU type */
if (sclp.has_core_type) {
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 8992b04c0ade..8964a3f60aad 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -520,6 +520,9 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) {
mask->next = memblock_alloc(sizeof(*mask->next), 8);
+ if (!mask->next)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*mask->next), 8);
mask = mask->next;
}
}
@@ -538,6 +541,9 @@ void __init topology_init_early(void)
if (!MACHINE_HAS_TOPOLOGY)
goto out;
tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!tl_info)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
info = tl_info;
store_topology(info);
pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 98f850e00008..a69a0911ed0e 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime,
*/
static int do_account_vtime(struct task_struct *tsk)
{
- u64 timer, clock, user, guest, system, hardirq, softirq, steal;
+ u64 timer, clock, user, guest, system, hardirq, softirq;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
@@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk)
if (softirq)
account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
- steal = S390_lowcore.steal_timer;
- if ((s64) steal > 0) {
- S390_lowcore.steal_timer = 0;
- account_steal_time(cputime_to_nsecs(steal));
- }
-
return virt_timer_forward(user + guest + system + hardirq + softirq);
}
@@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev)
*/
void vtime_flush(struct task_struct *tsk)
{
+ u64 steal, avg_steal;
+
if (do_account_vtime(tsk))
virt_timer_expire();
+
+ steal = S390_lowcore.steal_timer;
+ avg_steal = S390_lowcore.avg_steal_timer / 2;
+ if ((s64) steal > 0) {
+ S390_lowcore.steal_timer = 0;
+ account_steal_time(steal);
+ avg_steal += steal;
+ }
+ S390_lowcore.avg_steal_timer = avg_steal;
}
/*
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index fcb55b02990e..82162867f378 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -7,6 +7,9 @@
* Author(s): Carsten Otte <cotte@de.ibm.com>
*/
+#define KMSG_COMPONENT "kvm-s390"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/interrupt.h>
#include <linux/kvm_host.h>
#include <linux/hrtimer.h>
@@ -23,6 +26,7 @@
#include <asm/gmap.h>
#include <asm/switch_to.h>
#include <asm/nmi.h>
+#include <asm/airq.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include "trace-s390.h"
@@ -31,6 +35,8 @@
#define PFAULT_DONE 0x0680
#define VIRTIO_PARAM 0x0d00
+static struct kvm_s390_gib *gib;
+
/* handle external calls via sigp interpretation facility */
static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
{
@@ -217,22 +223,100 @@ static inline u8 int_word_to_isc(u32 int_word)
*/
#define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
-static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
+/**
+ * gisa_set_iam - change the GISA interruption alert mask
+ *
+ * @gisa: gisa to operate on
+ * @iam: new IAM value to use
+ *
+ * Change the IAM atomically with the next alert address and the IPM
+ * of the GISA if the GISA is not part of the GIB alert list. All three
+ * fields are located in the first long word of the GISA.
+ *
+ * Returns: 0 on success
+ * -EBUSY in case the gisa is part of the alert list
+ */
+static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam)
+{
+ u64 word, _word;
+
+ do {
+ word = READ_ONCE(gisa->u64.word[0]);
+ if ((u64)gisa != word >> 32)
+ return -EBUSY;
+ _word = (word & ~0xffUL) | iam;
+ } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
+
+ return 0;
+}
+
+/**
+ * gisa_clear_ipm - clear the GISA interruption pending mask
+ *
+ * @gisa: gisa to operate on
+ *
+ * Clear the IPM atomically with the next alert address and the IAM
+ * of the GISA unconditionally. All three fields are located in the
+ * first long word of the GISA.
+ */
+static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa)
+{
+ u64 word, _word;
+
+ do {
+ word = READ_ONCE(gisa->u64.word[0]);
+ _word = word & ~(0xffUL << 24);
+ } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
+}
+
+/**
+ * gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM
+ *
+ * @gi: gisa interrupt struct to work on
+ *
+ * Atomically restores the interruption alert mask if none of the
+ * relevant ISCs are pending and return the IPM.
+ *
+ * Returns: the relevant pending ISCs
+ */
+static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
+{
+ u8 pending_mask, alert_mask;
+ u64 word, _word;
+
+ do {
+ word = READ_ONCE(gi->origin->u64.word[0]);
+ alert_mask = READ_ONCE(gi->alert.mask);
+ pending_mask = (u8)(word >> 24) & alert_mask;
+ if (pending_mask)
+ return pending_mask;
+ _word = (word & ~0xffUL) | alert_mask;
+ } while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word);
+
+ return 0;
+}
+
+static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
+{
+ return READ_ONCE(gisa->next_alert) != (u32)(u64)gisa;
+}
+
+static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{
set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
}
-static inline u8 kvm_s390_gisa_get_ipm(struct kvm_s390_gisa *gisa)
+static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
{
return READ_ONCE(gisa->ipm);
}
-static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
+static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{
clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
}
-static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
+static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{
return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
}
@@ -245,8 +329,13 @@ static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
{
- return pending_irqs_no_gisa(vcpu) |
- kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7;
+ struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
+ unsigned long pending_mask;
+
+ pending_mask = pending_irqs_no_gisa(vcpu);
+ if (gi->origin)
+ pending_mask |= gisa_get_ipm(gi->origin) << IRQ_PEND_IO_ISC_7;
+ return pending_mask;
}
static inline int isc_to_irq_type(unsigned long isc)
@@ -318,13 +407,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{
kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
- set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
+ set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
}
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
- clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
+ clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
}
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
@@ -345,7 +434,7 @@ static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
{
if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
return;
- else if (psw_ioint_disabled(vcpu))
+ if (psw_ioint_disabled(vcpu))
kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
else
vcpu->arch.sie_block->lctl |= LCTL_CR6;
@@ -353,7 +442,7 @@ static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
{
- if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK))
+ if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK))
return;
if (psw_extint_disabled(vcpu))
kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
@@ -363,7 +452,7 @@ static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
{
- if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
+ if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK))
return;
if (psw_mchk_disabled(vcpu))
vcpu->arch.sie_block->ictl |= ICTL_LPSW;
@@ -956,6 +1045,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
{
struct list_head *isc_list;
struct kvm_s390_float_interrupt *fi;
+ struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
struct kvm_s390_interrupt_info *inti = NULL;
struct kvm_s390_io_info io;
u32 isc;
@@ -998,8 +1088,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
goto out;
}
- if (vcpu->kvm->arch.gisa &&
- kvm_s390_gisa_tac_ipm_gisc(vcpu->kvm->arch.gisa, isc)) {
+ if (gi->origin && gisa_tac_ipm_gisc(gi->origin, isc)) {
/*
* in case an adapter interrupt was not delivered
* in SIE context KVM will handle the delivery
@@ -1089,6 +1178,7 @@ static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
{
+ struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
u64 sltime;
vcpu->stat.exit_wait_state++;
@@ -1102,6 +1192,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP; /* disabled wait */
}
+ if (gi->origin &&
+ (gisa_get_ipm_or_restore_iam(gi) &
+ vcpu->arch.sie_block->gcr[6] >> 24))
+ return 0;
+
if (!ckc_interrupts_enabled(vcpu) &&
!cpu_timer_interrupts_enabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
@@ -1533,18 +1628,19 @@ static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
{
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
unsigned long active_mask;
int isc;
if (schid)
goto out;
- if (!kvm->arch.gisa)
+ if (!gi->origin)
goto out;
- active_mask = (isc_mask & kvm_s390_gisa_get_ipm(kvm->arch.gisa) << 24) << 32;
+ active_mask = (isc_mask & gisa_get_ipm(gi->origin) << 24) << 32;
while (active_mask) {
isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
- if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, isc))
+ if (gisa_tac_ipm_gisc(gi->origin, isc))
return isc;
clear_bit_inv(isc, &active_mask);
}
@@ -1567,6 +1663,7 @@ out:
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
u64 isc_mask, u32 schid)
{
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_s390_interrupt_info *inti, *tmp_inti;
int isc;
@@ -1584,7 +1681,7 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
/* both types of interrupts present */
if (int_word_to_isc(inti->io.io_int_word) <= isc) {
/* classical IO int with higher priority */
- kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
+ gisa_set_ipm_gisc(gi->origin, isc);
goto out;
}
gisa_out:
@@ -1596,7 +1693,7 @@ gisa_out:
kvm_s390_reinject_io_int(kvm, inti);
inti = tmp_inti;
} else
- kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
+ gisa_set_ipm_gisc(gi->origin, isc);
out:
return inti;
}
@@ -1685,6 +1782,7 @@ static int __inject_float_mchk(struct kvm *kvm,
static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_s390_float_interrupt *fi;
struct list_head *list;
int isc;
@@ -1692,9 +1790,9 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
kvm->stat.inject_io++;
isc = int_word_to_isc(inti->io.io_int_word);
- if (kvm->arch.gisa && inti->type & KVM_S390_INT_IO_AI_MASK) {
+ if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
- kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
+ gisa_set_ipm_gisc(gi->origin, isc);
kfree(inti);
return 0;
}
@@ -1726,7 +1824,6 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
*/
static void __floating_irq_kick(struct kvm *kvm, u64 type)
{
- struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
struct kvm_vcpu *dst_vcpu;
int sigcpu, online_vcpus, nr_tries = 0;
@@ -1735,11 +1832,11 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
return;
/* find idle VCPUs first, then round robin */
- sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
+ sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus);
if (sigcpu == online_vcpus) {
do {
- sigcpu = fi->next_rr_cpu;
- fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
+ sigcpu = kvm->arch.float_int.next_rr_cpu++;
+ kvm->arch.float_int.next_rr_cpu %= online_vcpus;
/* avoid endless loops if all vcpus are stopped */
if (nr_tries++ >= online_vcpus)
return;
@@ -1753,7 +1850,8 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- if (!(type & KVM_S390_INT_IO_AI_MASK && kvm->arch.gisa))
+ if (!(type & KVM_S390_INT_IO_AI_MASK &&
+ kvm->arch.gisa_int.origin))
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
break;
default:
@@ -2003,6 +2101,7 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
{
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_s390_interrupt_info *inti;
struct kvm_s390_float_interrupt *fi;
struct kvm_s390_irq *buf;
@@ -2026,15 +2125,14 @@ static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
max_irqs = len / sizeof(struct kvm_s390_irq);
- if (kvm->arch.gisa &&
- kvm_s390_gisa_get_ipm(kvm->arch.gisa)) {
+ if (gi->origin && gisa_get_ipm(gi->origin)) {
for (i = 0; i <= MAX_ISC; i++) {
if (n == max_irqs) {
/* signal userspace to try again */
ret = -ENOMEM;
goto out_nolock;
}
- if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, i)) {
+ if (gisa_tac_ipm_gisc(gi->origin, i)) {
irq = (struct kvm_s390_irq *) &buf[n];
irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
irq->u.io.io_int_word = isc_to_int_word(i);
@@ -2831,7 +2929,7 @@ static void store_local_irq(struct kvm_s390_local_interrupt *li,
int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
{
int scn;
- unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+ DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
unsigned long pending_irqs;
struct kvm_s390_irq irq;
@@ -2884,27 +2982,278 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
return n;
}
-void kvm_s390_gisa_clear(struct kvm *kvm)
+static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
{
- if (kvm->arch.gisa) {
- memset(kvm->arch.gisa, 0, sizeof(struct kvm_s390_gisa));
- kvm->arch.gisa->next_alert = (u32)(u64)kvm->arch.gisa;
- VM_EVENT(kvm, 3, "gisa 0x%pK cleared", kvm->arch.gisa);
+ int vcpu_id, online_vcpus = atomic_read(&kvm->online_vcpus);
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+ struct kvm_vcpu *vcpu;
+
+ for_each_set_bit(vcpu_id, kvm->arch.idle_mask, online_vcpus) {
+ vcpu = kvm_get_vcpu(kvm, vcpu_id);
+ if (psw_ioint_disabled(vcpu))
+ continue;
+ deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
+ if (deliverable_mask) {
+ /* lately kicked but not yet running */
+ if (test_and_set_bit(vcpu_id, gi->kicked_mask))
+ return;
+ kvm_s390_vcpu_wakeup(vcpu);
+ return;
+ }
}
}
+static enum hrtimer_restart gisa_vcpu_kicker(struct hrtimer *timer)
+{
+ struct kvm_s390_gisa_interrupt *gi =
+ container_of(timer, struct kvm_s390_gisa_interrupt, timer);
+ struct kvm *kvm =
+ container_of(gi->origin, struct sie_page2, gisa)->kvm;
+ u8 pending_mask;
+
+ pending_mask = gisa_get_ipm_or_restore_iam(gi);
+ if (pending_mask) {
+ __airqs_kick_single_vcpu(kvm, pending_mask);
+ hrtimer_forward_now(timer, ns_to_ktime(gi->expires));
+ return HRTIMER_RESTART;
+ };
+
+ return HRTIMER_NORESTART;
+}
+
+#define NULL_GISA_ADDR 0x00000000UL
+#define NONE_GISA_ADDR 0x00000001UL
+#define GISA_ADDR_MASK 0xfffff000UL
+
+static void process_gib_alert_list(void)
+{
+ struct kvm_s390_gisa_interrupt *gi;
+ struct kvm_s390_gisa *gisa;
+ struct kvm *kvm;
+ u32 final, origin = 0UL;
+
+ do {
+ /*
+ * If the NONE_GISA_ADDR is still stored in the alert list
+ * origin, we will leave the outer loop. No further GISA has
+ * been added to the alert list by millicode while processing
+ * the current alert list.
+ */
+ final = (origin & NONE_GISA_ADDR);
+ /*
+ * Cut off the alert list and store the NONE_GISA_ADDR in the
+ * alert list origin to avoid further GAL interruptions.
+ * A new alert list can be build up by millicode in parallel
+ * for guests not in the yet cut-off alert list. When in the
+ * final loop, store the NULL_GISA_ADDR instead. This will re-
+ * enable GAL interruptions on the host again.
+ */
+ origin = xchg(&gib->alert_list_origin,
+ (!final) ? NONE_GISA_ADDR : NULL_GISA_ADDR);
+ /*
+ * Loop through the just cut-off alert list and start the
+ * gisa timers to kick idle vcpus to consume the pending
+ * interruptions asap.
+ */
+ while (origin & GISA_ADDR_MASK) {
+ gisa = (struct kvm_s390_gisa *)(u64)origin;
+ origin = gisa->next_alert;
+ gisa->next_alert = (u32)(u64)gisa;
+ kvm = container_of(gisa, struct sie_page2, gisa)->kvm;
+ gi = &kvm->arch.gisa_int;
+ if (hrtimer_active(&gi->timer))
+ hrtimer_cancel(&gi->timer);
+ hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
+ }
+ } while (!final);
+
+}
+
+void kvm_s390_gisa_clear(struct kvm *kvm)
+{
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+
+ if (!gi->origin)
+ return;
+ gisa_clear_ipm(gi->origin);
+ VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
+}
+
void kvm_s390_gisa_init(struct kvm *kvm)
{
- if (css_general_characteristics.aiv) {
- kvm->arch.gisa = &kvm->arch.sie_page2->gisa;
- VM_EVENT(kvm, 3, "gisa 0x%pK initialized", kvm->arch.gisa);
- kvm_s390_gisa_clear(kvm);
- }
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+
+ if (!css_general_characteristics.aiv)
+ return;
+ gi->origin = &kvm->arch.sie_page2->gisa;
+ gi->alert.mask = 0;
+ spin_lock_init(&gi->alert.ref_lock);
+ gi->expires = 50 * 1000; /* 50 usec */
+ hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ gi->timer.function = gisa_vcpu_kicker;
+ memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
+ gi->origin->next_alert = (u32)(u64)gi->origin;
+ VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
}
void kvm_s390_gisa_destroy(struct kvm *kvm)
{
- if (!kvm->arch.gisa)
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+
+ if (!gi->origin)
+ return;
+ if (gi->alert.mask)
+ KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x",
+ kvm, gi->alert.mask);
+ while (gisa_in_alert_list(gi->origin))
+ cpu_relax();
+ hrtimer_cancel(&gi->timer);
+ gi->origin = NULL;
+}
+
+/**
+ * kvm_s390_gisc_register - register a guest ISC
+ *
+ * @kvm: the kernel vm to work with
+ * @gisc: the guest interruption sub class to register
+ *
+ * The function extends the vm specific alert mask to use.
+ * The effective IAM mask in the GISA is updated as well
+ * in case the GISA is not part of the GIB alert list.
+ * It will be updated latest when the IAM gets restored
+ * by gisa_get_ipm_or_restore_iam().
+ *
+ * Returns: the nonspecific ISC (NISC) the gib alert mechanism
+ * has registered with the channel subsystem.
+ * -ENODEV in case the vm uses no GISA
+ * -ERANGE in case the guest ISC is invalid
+ */
+int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc)
+{
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+
+ if (!gi->origin)
+ return -ENODEV;
+ if (gisc > MAX_ISC)
+ return -ERANGE;
+
+ spin_lock(&gi->alert.ref_lock);
+ gi->alert.ref_count[gisc]++;
+ if (gi->alert.ref_count[gisc] == 1) {
+ gi->alert.mask |= 0x80 >> gisc;
+ gisa_set_iam(gi->origin, gi->alert.mask);
+ }
+ spin_unlock(&gi->alert.ref_lock);
+
+ return gib->nisc;
+}
+EXPORT_SYMBOL_GPL(kvm_s390_gisc_register);
+
+/**
+ * kvm_s390_gisc_unregister - unregister a guest ISC
+ *
+ * @kvm: the kernel vm to work with
+ * @gisc: the guest interruption sub class to register
+ *
+ * The function reduces the vm specific alert mask to use.
+ * The effective IAM mask in the GISA is updated as well
+ * in case the GISA is not part of the GIB alert list.
+ * It will be updated latest when the IAM gets restored
+ * by gisa_get_ipm_or_restore_iam().
+ *
+ * Returns: the nonspecific ISC (NISC) the gib alert mechanism
+ * has registered with the channel subsystem.
+ * -ENODEV in case the vm uses no GISA
+ * -ERANGE in case the guest ISC is invalid
+ * -EINVAL in case the guest ISC is not registered
+ */
+int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc)
+{
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+ int rc = 0;
+
+ if (!gi->origin)
+ return -ENODEV;
+ if (gisc > MAX_ISC)
+ return -ERANGE;
+
+ spin_lock(&gi->alert.ref_lock);
+ if (gi->alert.ref_count[gisc] == 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+ gi->alert.ref_count[gisc]--;
+ if (gi->alert.ref_count[gisc] == 0) {
+ gi->alert.mask &= ~(0x80 >> gisc);
+ gisa_set_iam(gi->origin, gi->alert.mask);
+ }
+out:
+ spin_unlock(&gi->alert.ref_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister);
+
+static void gib_alert_irq_handler(struct airq_struct *airq)
+{
+ inc_irq_stat(IRQIO_GAL);
+ process_gib_alert_list();
+}
+
+static struct airq_struct gib_alert_irq = {
+ .handler = gib_alert_irq_handler,
+ .lsi_ptr = &gib_alert_irq.lsi_mask,
+};
+
+void kvm_s390_gib_destroy(void)
+{
+ if (!gib)
return;
- kvm->arch.gisa = NULL;
+ chsc_sgib(0);
+ unregister_adapter_interrupt(&gib_alert_irq);
+ free_page((unsigned long)gib);
+ gib = NULL;
+}
+
+int kvm_s390_gib_init(u8 nisc)
+{
+ int rc = 0;
+
+ if (!css_general_characteristics.aiv) {
+ KVM_EVENT(3, "%s", "gib not initialized, no AIV facility");
+ goto out;
+ }
+
+ gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!gib) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ gib_alert_irq.isc = nisc;
+ if (register_adapter_interrupt(&gib_alert_irq)) {
+ pr_err("Registering the GIB alert interruption handler failed\n");
+ rc = -EIO;
+ goto out_free_gib;
+ }
+
+ gib->nisc = nisc;
+ if (chsc_sgib((u32)(u64)gib)) {
+ pr_err("Associating the GIB with the AIV facility failed\n");
+ free_page((unsigned long)gib);
+ gib = NULL;
+ rc = -EIO;
+ goto out_unreg_gal;
+ }
+
+ KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
+ goto out;
+
+out_unreg_gal:
+ unregister_adapter_interrupt(&gib_alert_irq);
+out_free_gib:
+ free_page((unsigned long)gib);
+ gib = NULL;
+out:
+ return rc;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 7f4bc58a53b9..4638303ba6a8 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -432,11 +432,18 @@ int kvm_arch_init(void *opaque)
/* Register floating interrupt controller interface. */
rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
if (rc) {
- pr_err("Failed to register FLIC rc=%d\n", rc);
+ pr_err("A FLIC registration call failed with rc=%d\n", rc);
goto out_debug_unreg;
}
+
+ rc = kvm_s390_gib_init(GAL_ISC);
+ if (rc)
+ goto out_gib_destroy;
+
return 0;
+out_gib_destroy:
+ kvm_s390_gib_destroy();
out_debug_unreg:
debug_unregister(kvm_s390_dbf);
return rc;
@@ -444,6 +451,7 @@ out_debug_unreg:
void kvm_arch_exit(void)
{
+ kvm_s390_gib_destroy();
debug_unregister(kvm_s390_dbf);
}
@@ -1258,11 +1266,65 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
struct kvm_device_attr *attr)
{
- /*
- * Once supported by kernel + hw, we have to store the subfunctions
- * in kvm->arch and remember that user space configured them.
- */
- return -ENXIO;
+ mutex_lock(&kvm->lock);
+ if (kvm->created_vcpus) {
+ mutex_unlock(&kvm->lock);
+ return -EBUSY;
+ }
+
+ if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
+ sizeof(struct kvm_s390_vm_cpu_subfunc))) {
+ mutex_unlock(&kvm->lock);
+ return -EFAULT;
+ }
+ mutex_unlock(&kvm->lock);
+
+ VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
+ VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
+ VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
+ VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
+ VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
+
+ return 0;
}
static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
@@ -1381,12 +1443,56 @@ static int kvm_s390_get_machine_feat(struct kvm *kvm,
static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
struct kvm_device_attr *attr)
{
- /*
- * Once we can actually configure subfunctions (kernel + hw support),
- * we have to check if they were already set by user space, if so copy
- * them from kvm->arch.
- */
- return -ENXIO;
+ if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
+ sizeof(struct kvm_s390_vm_cpu_subfunc)))
+ return -EFAULT;
+
+ VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
+ VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
+ VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
+ VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
+ VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
+
+ return 0;
}
static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
@@ -1395,8 +1501,55 @@ static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
sizeof(struct kvm_s390_vm_cpu_subfunc)))
return -EFAULT;
+
+ VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
+ ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
+ ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
+ VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
+ VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
+ VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
+ VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
+ VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
+ VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
+ VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
+ VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
+ VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
+ VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
+ VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
+ VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
+ VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
+
return 0;
}
+
static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret = -ENXIO;
@@ -1514,10 +1667,9 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CPU_PROCESSOR_FEAT:
case KVM_S390_VM_CPU_MACHINE_FEAT:
case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
+ case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
ret = 0;
break;
- /* configuring subfunctions is not supported yet */
- case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
default:
ret = -ENXIO;
break;
@@ -2209,6 +2361,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.sie_page2)
goto out_err;
+ kvm->arch.sie_page2->kvm = kvm;
kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
for (i = 0; i < kvm_s390_fac_size(); i++) {
@@ -2218,6 +2371,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
kvm_s390_fac_base[i];
}
+ kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
/* we are always in czam mode - even on pre z14 machines */
set_kvm_facility(kvm->arch.model.fac_mask, 138);
@@ -2812,7 +2966,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->icpua = id;
spin_lock_init(&vcpu->arch.local_int.lock);
- vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa;
+ vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
vcpu->arch.sie_block->gd |= GISA_FORMAT1;
seqcount_init(&vcpu->arch.cputm_seqcount);
@@ -3458,6 +3612,8 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
kvm_s390_patch_guest_per_regs(vcpu);
}
+ clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
+
vcpu->arch.sie_block->icptcode = 0;
cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
@@ -4293,12 +4449,12 @@ static int __init kvm_s390_init(void)
int i;
if (!sclp.has_sief2) {
- pr_info("SIE not available\n");
+ pr_info("SIE is not available\n");
return -ENODEV;
}
if (nested && hpage) {
- pr_info("nested (vSIE) and hpage (huge page backing) can currently not be activated concurrently");
+ pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
return -EINVAL;
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 1f6e36cdce0d..6d9448dbd052 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -67,7 +67,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{
- return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
+ return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
}
static inline int kvm_is_ucontrol(struct kvm *kvm)
@@ -381,6 +381,8 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
void kvm_s390_gisa_init(struct kvm *kvm);
void kvm_s390_gisa_clear(struct kvm *kvm);
void kvm_s390_gisa_destroy(struct kvm *kvm);
+int kvm_s390_gib_init(u8 nisc);
+void kvm_s390_gib_destroy(void);
/* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index bfba273c32c0..71a12a4f4906 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -313,6 +313,9 @@ static void __ref create_core_to_node_map(void)
int i;
emu_cores = memblock_alloc(sizeof(*emu_cores), 8);
+ if (!emu_cores)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*emu_cores), 8);
for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
emu_cores->to_node_id[i] = NODE_ID_FREE;
}
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index 2d1271e2a70d..8eb9e9743f5d 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -92,8 +92,12 @@ static void __init numa_setup_memory(void)
} while (cur_base < end_of_dram);
/* Allocate and fill out node_data */
- for (nid = 0; nid < MAX_NUMNODES; nid++)
+ for (nid = 0; nid < MAX_NUMNODES; nid++) {
NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8);
+ if (!NODE_DATA(nid))
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(pg_data_t), 8);
+ }
for_each_online_node(nid) {
unsigned long start_pfn, end_pfn;
diff --git a/arch/s390/scripts/Makefile.chkbss b/arch/s390/scripts/Makefile.chkbss
index 9bba2c14e0ca..cd7e8f4419f5 100644
--- a/arch/s390/scripts/Makefile.chkbss
+++ b/arch/s390/scripts/Makefile.chkbss
@@ -1,23 +1,20 @@
# SPDX-License-Identifier: GPL-2.0
+chkbss-target ?= built-in.a
+$(obj)/$(chkbss-target): chkbss
+
+chkbss-files := $(addsuffix .chkbss, $(chkbss))
+clean-files += $(chkbss-files)
+
+PHONY += chkbss
+chkbss: $(addprefix $(obj)/, $(chkbss-files))
+
quiet_cmd_chkbss = CHKBSS $<
-define cmd_chkbss
- rm -f $@; \
+ cmd_chkbss = \
if ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \
echo "error: $< .bss section is not empty" >&2; exit 1; \
fi; \
touch $@;
-endef
-
-chkbss-target ?= $(obj)/built-in.a
-ifneq (,$(findstring /,$(chkbss)))
-chkbss-files := $(patsubst %, %.chkbss, $(chkbss))
-else
-chkbss-files := $(patsubst %, $(obj)/%.chkbss, $(chkbss))
-endif
-
-$(chkbss-target): $(chkbss-files)
-targets += $(notdir $(chkbss-files))
-%.o.chkbss: %.o
+$(obj)/%.o.chkbss: $(obj)/%.o
$(call cmd,chkbss)
OpenPOWER on IntegriCloud