diff options
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/fpu.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/nospec-branch.c | 3 | ||||
-rw-r--r-- | arch/s390/kernel/perf_cpum_cf_diag.c | 19 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 3 | ||||
-rw-r--r-- | arch/s390/kernel/stacktrace.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/syscalls/syscall.tbl | 4 | ||||
-rw-r--r-- | arch/s390/kernel/vtime.c | 27 |
7 files changed, 38 insertions, 26 deletions
diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c index 594464f2129d..0da378e2eb25 100644 --- a/arch/s390/kernel/fpu.c +++ b/arch/s390/kernel/fpu.c @@ -23,7 +23,7 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags) if (flags & KERNEL_FPC) /* Save floating point control */ - asm volatile("stfpc %0" : "=m" (state->fpc)); + asm volatile("stfpc %0" : "=Q" (state->fpc)); if (!MACHINE_HAS_VX) { if (flags & KERNEL_VXR_V0V7) { diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index bdddaae96559..649135cbedd5 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> #include <linux/device.h> +#include <linux/cpu.h> #include <asm/nospec-branch.h> static int __init nobp_setup_early(char *str) @@ -58,7 +59,7 @@ early_param("nospectre_v2", nospectre_v2_setup_early); void __init nospec_auto_detect(void) { - if (test_facility(156)) { + if (test_facility(156) || cpu_mitigations_off()) { /* * The machine supports etokens. * Disable expolines and disable nobp. diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c index c6fad208c2fa..b6854812d2ed 100644 --- a/arch/s390/kernel/perf_cpum_cf_diag.c +++ b/arch/s390/kernel/perf_cpum_cf_diag.c @@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event) */ static int __hw_perf_event_init(struct perf_event *event) { - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct perf_event_attr *attr = &event->attr; + struct cpu_cf_events *cpuhw; enum cpumf_ctr_set i; int err = 0; - debug_sprintf_event(cf_diag_dbg, 5, - "%s event %p cpu %d authorized %#x\n", __func__, - event, event->cpu, cpuhw->info.auth_ctl); + debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__, + event, event->cpu); event->hw.config = attr->config; event->hw.config_base = 0; - local64_set(&event->count, 0); - /* Add all authorized counter sets to config_base */ + /* Add all authorized counter sets to config_base. The + * the hardware init function is either called per-cpu or just once + * for all CPUS (event->cpu == -1). This depends on the whether + * counting is started for all CPUs or on a per workload base where + * the perf event moves from one CPU to another CPU. + * Checking the authorization on any CPU is fine as the hardware + * applies the same authorization settings to all CPUs. + */ + cpuhw = &get_cpu_var(cpu_cf_events); for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i]) event->hw.config_base |= cpumf_ctr_ctl[i]; + put_cpu_var(cpu_cf_events); /* No authorized counter sets, nothing to count/sample */ if (!event->hw.config_base) { diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 3fe1c77c361b..bd197baf1dc3 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -266,7 +266,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) lc->percpu_offset = __per_cpu_offset[cpu]; lc->kernel_asce = S390_lowcore.kernel_asce; lc->machine_flags = S390_lowcore.machine_flags; - lc->user_timer = lc->system_timer = lc->steal_timer = 0; + lc->user_timer = lc->system_timer = + lc->steal_timer = lc->avg_steal_timer = 0; __ctl_store(lc->cregs_save_area, 0, 15); save_access_regs((unsigned int *) lc->access_regs_save_area); memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 460dcfba7d4e..cc9ed9787068 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -45,8 +45,6 @@ void save_stack_trace(struct stack_trace *trace) sp = current_stack_pointer(); dump_trace(save_address, trace, NULL, sp); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace); @@ -58,8 +56,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) if (tsk == current) sp = current_stack_pointer(); dump_trace(save_address_nosched, trace, tsk, sp); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); @@ -69,7 +65,5 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) sp = kernel_stack_pointer(regs); dump_trace(save_address, trace, NULL, sp); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace_regs); diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index 02579f95f391..061418f787c3 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -426,3 +426,7 @@ 421 32 rt_sigtimedwait_time64 - compat_sys_rt_sigtimedwait_time64 422 32 futex_time64 - sys_futex 423 32 sched_rr_get_interval_time64 - sys_sched_rr_get_interval +424 common pidfd_send_signal sys_pidfd_send_signal sys_pidfd_send_signal +425 common io_uring_setup sys_io_uring_setup sys_io_uring_setup +426 common io_uring_enter sys_io_uring_enter sys_io_uring_enter +427 common io_uring_register sys_io_uring_register sys_io_uring_register diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 98f850e00008..c475ca49cfc6 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -37,7 +37,7 @@ static inline u64 get_vtimer(void) { u64 timer; - asm volatile("stpt %0" : "=m" (timer)); + asm volatile("stpt %0" : "=Q" (timer)); return timer; } @@ -48,7 +48,7 @@ static inline void set_vtimer(u64 expires) asm volatile( " stpt %0\n" /* Store current cpu timer value */ " spt %1" /* Set new value imm. afterwards */ - : "=m" (timer) : "m" (expires)); + : "=Q" (timer) : "Q" (expires)); S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; S390_lowcore.last_update_timer = expires; } @@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime, */ static int do_account_vtime(struct task_struct *tsk) { - u64 timer, clock, user, guest, system, hardirq, softirq, steal; + u64 timer, clock, user, guest, system, hardirq, softirq; timer = S390_lowcore.last_update_timer; clock = S390_lowcore.last_update_clock; @@ -135,8 +135,8 @@ static int do_account_vtime(struct task_struct *tsk) #else " stck %1" /* Store current tod clock value */ #endif - : "=m" (S390_lowcore.last_update_timer), - "=m" (S390_lowcore.last_update_clock)); + : "=Q" (S390_lowcore.last_update_timer), + "=Q" (S390_lowcore.last_update_clock)); clock = S390_lowcore.last_update_clock - clock; timer -= S390_lowcore.last_update_timer; @@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk) if (softirq) account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ); - steal = S390_lowcore.steal_timer; - if ((s64) steal > 0) { - S390_lowcore.steal_timer = 0; - account_steal_time(cputime_to_nsecs(steal)); - } - return virt_timer_forward(user + guest + system + hardirq + softirq); } @@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev) */ void vtime_flush(struct task_struct *tsk) { + u64 steal, avg_steal; + if (do_account_vtime(tsk)) virt_timer_expire(); + + steal = S390_lowcore.steal_timer; + avg_steal = S390_lowcore.avg_steal_timer / 2; + if ((s64) steal > 0) { + S390_lowcore.steal_timer = 0; + account_steal_time(steal); + avg_steal += steal; + } + S390_lowcore.avg_steal_timer = avg_steal; } /* |