From 524b24ada792b40e1eb2eae59f7a096f26c3b788 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 4 Jun 2012 12:11:41 +0200 Subject: s390/smp: remove redundant check condition code "status stored" for sigp sense running always implies that only the "not running" status bit is set. Therefore no need to check if it is set. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/s390/kernel/smp.c') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 15cca26ccb6c..c78074c6cc1d 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -158,8 +158,8 @@ static inline int pcpu_running(struct pcpu *pcpu) if (__pcpu_sigp(pcpu->address, sigp_sense_running, 0, &pcpu->status) != sigp_status_stored) return 1; - /* Check for running status */ - return !(pcpu->status & 0x400); + /* Status stored condition code is equivalent to cpu not running. */ + return 0; } /* -- cgit v1.2.1 From a9ae32c3d9a6557f24db0e186bf2f84205780b8a Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 4 Jun 2012 12:55:15 +0200 Subject: s390/smp/kvm: unifiy sigp definitions The smp and the kvm code have different defines for the sigp order codes. Let's just have a single place where these are defined. Also move the sigp condition code and sigp cpu status bits to the new sigp.h header file. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 72 +++++++++++++++++--------------------------------- 1 file changed, 24 insertions(+), 48 deletions(-) (limited to 'arch/s390/kernel/smp.c') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index c78074c6cc1d..6e4047e4b498 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -44,33 +44,9 @@ #include #include #include +#include #include "entry.h" -enum { - sigp_sense = 1, - sigp_external_call = 2, - sigp_emergency_signal = 3, - sigp_start = 4, - sigp_stop = 5, - sigp_restart = 6, - sigp_stop_and_store_status = 9, - sigp_initial_cpu_reset = 11, - sigp_cpu_reset = 12, - sigp_set_prefix = 13, - sigp_store_status_at_address = 14, - sigp_store_extended_status_at_address = 15, - sigp_set_architecture = 18, - sigp_conditional_emergency_signal = 19, - sigp_sense_running = 21, -}; - -enum { - sigp_order_code_accepted = 0, - sigp_status_stored = 1, - sigp_busy = 2, - sigp_not_operational = 3, -}; - enum { ec_schedule = 0, ec_call_function, @@ -124,7 +100,7 @@ static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status) while (1) { cc = __pcpu_sigp(addr, order, parm, status); - if (cc != sigp_busy) + if (cc != SIGP_CC_BUSY) return cc; cpu_relax(); } @@ -136,7 +112,7 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) for (retry = 0; ; retry++) { cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status); - if (cc != sigp_busy) + if (cc != SIGP_CC_BUSY) break; if (retry >= 3) udelay(10); @@ -146,8 +122,8 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) static inline int pcpu_stopped(struct pcpu *pcpu) { - if (__pcpu_sigp(pcpu->address, sigp_sense, - 0, &pcpu->status) != sigp_status_stored) + if (__pcpu_sigp(pcpu->address, SIGP_SENSE, + 0, &pcpu->status) != SIGP_CC_STATUS_STORED) return 0; /* Check for stopped and check stop state */ return !!(pcpu->status & 0x50); @@ -155,8 +131,8 @@ static inline int pcpu_stopped(struct pcpu *pcpu) static inline int pcpu_running(struct pcpu *pcpu) { - if (__pcpu_sigp(pcpu->address, sigp_sense_running, - 0, &pcpu->status) != sigp_status_stored) + if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING, + 0, &pcpu->status) != SIGP_CC_STATUS_STORED) return 1; /* Status stored condition code is equivalent to cpu not running. */ return 0; @@ -181,7 +157,7 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) set_bit(ec_bit, &pcpu->ec_mask); order = pcpu_running(pcpu) ? - sigp_external_call : sigp_emergency_signal; + SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; pcpu_sigp_retry(pcpu, order, 0); } @@ -214,7 +190,7 @@ static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) goto out; #endif lowcore_ptr[cpu] = lc; - pcpu_sigp_retry(pcpu, sigp_set_prefix, (u32)(unsigned long) lc); + pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); return 0; out: if (pcpu != &pcpu_devices[0]) { @@ -229,7 +205,7 @@ out: static void pcpu_free_lowcore(struct pcpu *pcpu) { - pcpu_sigp_retry(pcpu, sigp_set_prefix, 0); + pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); lowcore_ptr[pcpu - pcpu_devices] = NULL; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { @@ -288,7 +264,7 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) lc->restart_fn = (unsigned long) func; lc->restart_data = (unsigned long) data; lc->restart_source = -1UL; - pcpu_sigp_retry(pcpu, sigp_restart, 0); + pcpu_sigp_retry(pcpu, SIGP_RESTART, 0); } /* @@ -309,7 +285,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), if (pcpu->address == restart.source) func(data); /* should not return */ /* Stop target cpu (if func returns this stops the current cpu). */ - pcpu_sigp_retry(pcpu, sigp_stop, 0); + pcpu_sigp_retry(pcpu, SIGP_STOP, 0); /* Restart func on the target cpu and stop the current cpu. */ memcpy_absolute(&lc->restart_stack, &restart, sizeof(restart)); asm volatile( @@ -388,8 +364,8 @@ void smp_emergency_stop(cpumask_t *cpumask) for_each_cpu(cpu, cpumask) { struct pcpu *pcpu = pcpu_devices + cpu; set_bit(ec_stop_cpu, &pcpu->ec_mask); - while (__pcpu_sigp(pcpu->address, sigp_emergency_signal, - 0, NULL) == sigp_busy && + while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, + 0, NULL) == SIGP_CC_BUSY && get_clock() < end) cpu_relax(); } @@ -425,7 +401,7 @@ void smp_send_stop(void) /* stop all processors */ for_each_cpu(cpu, &cpumask) { struct pcpu *pcpu = pcpu_devices + cpu; - pcpu_sigp_retry(pcpu, sigp_stop, 0); + pcpu_sigp_retry(pcpu, SIGP_STOP, 0); while (!pcpu_stopped(pcpu)) cpu_relax(); } @@ -436,7 +412,7 @@ void smp_send_stop(void) */ void smp_stop_cpu(void) { - pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0); + pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); for (;;) ; } @@ -590,7 +566,7 @@ static void __init smp_get_save_area(int cpu, u16 address) } #endif /* Get the registers of a non-boot cpu. */ - __pcpu_sigp_relax(address, sigp_stop_and_store_status, 0, NULL); + __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); } @@ -599,8 +575,8 @@ int smp_store_status(int cpu) struct pcpu *pcpu; pcpu = pcpu_devices + cpu; - if (__pcpu_sigp_relax(pcpu->address, sigp_stop_and_store_status, - 0, NULL) != sigp_order_code_accepted) + if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS, + 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO; return 0; } @@ -621,8 +597,8 @@ static struct sclp_cpu_info *smp_get_cpu_info(void) if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { use_sigp_detection = 1; for (address = 0; address <= MAX_CPU_ADDRESS; address++) { - if (__pcpu_sigp_relax(address, sigp_sense, 0, NULL) == - sigp_not_operational) + if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == + SIGP_CC_NOT_OPERATIONAL) continue; info->cpu[info->configured].address = address; info->configured++; @@ -734,8 +710,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) pcpu = pcpu_devices + cpu; if (pcpu->state != CPU_STATE_CONFIGURED) return -EIO; - if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) != - sigp_order_code_accepted) + if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != + SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO; rc = pcpu_alloc_lowcore(pcpu, cpu); @@ -795,7 +771,7 @@ void __cpu_die(unsigned int cpu) void __noreturn cpu_die(void) { idle_task_exit(); - pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0); + pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); for (;;) ; } -- cgit v1.2.1 From a095a8a9d5c2ffa15589298aabb64c75c39bf9be Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 4 Jun 2012 14:07:47 +0200 Subject: s390/smp: use sigp cpu status definitions We got them from the kvm code, so let's use them. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/s390/kernel/smp.c') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 6e4047e4b498..53ac2344ca7a 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -125,8 +125,7 @@ static inline int pcpu_stopped(struct pcpu *pcpu) if (__pcpu_sigp(pcpu->address, SIGP_SENSE, 0, &pcpu->status) != SIGP_CC_STATUS_STORED) return 0; - /* Check for stopped and check stop state */ - return !!(pcpu->status & 0x50); + return !!(pcpu->status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); } static inline int pcpu_running(struct pcpu *pcpu) -- cgit v1.2.1 From eb546195a7d8bc492ec6865980bf767474e74d87 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 4 Jun 2012 15:05:43 +0200 Subject: s390/sigp: use sigp order code defines in assembly code Use sigp order code defines in assembly code as well. With this change all places that use sigp constants should have been converted to use self describing defines instead of directly using constants. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/s390/kernel/smp.c') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 53ac2344ca7a..e01408429ad6 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -288,11 +288,13 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), /* Restart func on the target cpu and stop the current cpu. */ memcpy_absolute(&lc->restart_stack, &restart, sizeof(restart)); asm volatile( - "0: sigp 0,%0,6 # sigp restart to target cpu\n" + "0: sigp 0,%0,%2 # sigp restart to target cpu\n" " brc 2,0b # busy, try again\n" - "1: sigp 0,%1,5 # sigp stop to current cpu\n" + "1: sigp 0,%1,%3 # sigp stop to current cpu\n" " brc 2,1b # busy, try again\n" - : : "d" (pcpu->address), "d" (restart.source) : "0", "1", "cc"); + : : "d" (pcpu->address), "d" (restart.source), + "K" (SIGP_RESTART), "K" (SIGP_STOP) + : "0", "1", "cc"); for (;;) ; } -- cgit v1.2.1 From fbe765680d1fe9d08187ea4dad5041a7955a2c3a Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 5 Jun 2012 09:59:52 +0200 Subject: s390/smp: make absolute lowcore / cpu restart parameter accesses more robust Setting the cpu restart parameters is done in three different fashions: - directly setting the four parameters individually - copying the four parameters with memcpy (using 4 * sizeof(long)) - copying the four parameters using a private structure In addition code in entry*.S relies on a certain order of the restart members of struct _lowcore. Make all of this more robust to future changes by adding a mem_absolute_assign(dest, val) define, which assigns val to dest using absolute addressing mode. Also the load multiple instructions in entry*.S have been split into separate load instruction so the order of the struct _lowcore members doesn't matter anymore. In addition move the prototypes of memcpy_real/absolute from uaccess.h to processor.h. These memcpy* variants are not related to uaccess at all. string.h doesn't seem to match as well, so lets use processor.h. Also replace the eight byte array in struct _lowcore which represents a misaliged u64 with a u64. The compiler will always create code that handles the misaligned u64 correctly. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'arch/s390/kernel/smp.c') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index e01408429ad6..dc602a61233f 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -273,26 +273,24 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), void *data, unsigned long stack) { struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; - struct { - unsigned long stack; - void *func; - void *data; - unsigned long source; - } restart = { stack, func, data, stap() }; + unsigned long source_cpu = stap(); __load_psw_mask(psw_kernel_bits); - if (pcpu->address == restart.source) + if (pcpu->address == source_cpu) func(data); /* should not return */ /* Stop target cpu (if func returns this stops the current cpu). */ pcpu_sigp_retry(pcpu, SIGP_STOP, 0); /* Restart func on the target cpu and stop the current cpu. */ - memcpy_absolute(&lc->restart_stack, &restart, sizeof(restart)); + mem_assign_absolute(lc->restart_stack, stack); + mem_assign_absolute(lc->restart_fn, (unsigned long) func); + mem_assign_absolute(lc->restart_data, (unsigned long) data); + mem_assign_absolute(lc->restart_source, source_cpu); asm volatile( "0: sigp 0,%0,%2 # sigp restart to target cpu\n" " brc 2,0b # busy, try again\n" "1: sigp 0,%1,%3 # sigp stop to current cpu\n" " brc 2,1b # busy, try again\n" - : : "d" (pcpu->address), "d" (restart.source), + : : "d" (pcpu->address), "d" (source_cpu), "K" (SIGP_RESTART), "K" (SIGP_STOP) : "0", "1", "cc"); for (;;) ; -- cgit v1.2.1 From 0008204ffe85d23382d6fd0f971f3f0fbe70bae2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 13 Jul 2012 15:45:33 +0200 Subject: s390/idle: fix sequence handling vs cpu hotplug The s390 idle accounting code uses a sequence counter which gets used when the per cpu idle statistics get updated and read. One assumption on read access is that only when the sequence counter is even and did not change while reading all values the result is valid. On cpu hotplug however the per cpu data structure gets initialized via a cpu hotplug notifier on CPU_ONLINE. CPU_ONLINE however is too late, since the onlined cpu is already running and might access the per cpu data. Worst case is that the data structure gets initialized while an idle thread is updating its idle statistics. This will result in an uneven sequence counter after an update. As a result user space tools like top, which access /proc/stat in order to get idle stats, will busy loop waiting for the sequence counter to become even again, which will never happen until the queried cpu will update its idle statistics again. And even then the sequence counter will only have an even value for a couple of cpu cycles. Fix this by moving the initialization of the per cpu idle statistics to cpu_init(). I prefer that solution in favor of changing the notifier to CPU_UP_PREPARE, which would be a different solution to the problem. Cc: stable@vger.kernel.org Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/s390/kernel/smp.c') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index dc602a61233f..22257f241d07 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -959,14 +959,11 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self, unsigned int cpu = (unsigned int)(long)hcpu; struct cpu *c = &pcpu_devices[cpu].cpu; struct device *s = &c->dev; - struct s390_idle_data *idle; int err = 0; switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: - idle = &per_cpu(s390_idle, cpu); - memset(idle, 0, sizeof(struct s390_idle_data)); err = sysfs_create_group(&s->kobj, &cpu_online_attr_group); break; case CPU_DEAD: -- cgit v1.2.1 From a53c8fab3f87c995c30ac226a03af95361243144 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 20 Jul 2012 11:15:04 +0200 Subject: s390/comments: unify copyright messages and remove file names Remove the file name from the comment at top of many files. In most cases the file name was wrong anyway, so it's rather pointless. Also unify the IBM copyright statement. We did have a lot of sightly different statements and wanted to change them one after another whenever a file gets touched. However that never happened. Instead people start to take the old/"wrong" statements to use as a template for new files. So unify all of them in one go. Signed-off-by: Heiko Carstens --- arch/s390/kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390/kernel/smp.c') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 22257f241d07..eeb441bbddae 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -1,7 +1,7 @@ /* * SMP related functions * - * Copyright IBM Corp. 1999,2012 + * Copyright IBM Corp. 1999, 2012 * Author(s): Denis Joseph Barrow, * Martin Schwidefsky , * Heiko Carstens , -- cgit v1.2.1 From 27f6b416626a240e1b46f646d2e0c5266f4eac95 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 20 Jul 2012 11:15:08 +0200 Subject: s390/vtimer: rework virtual timer interface The current virtual timer interface is inherently per-cpu and hard to use. The sole user of the interface is appldata which uses it to execute a function after a specific amount of cputime has been used over all cpus. Rework the virtual timer interface to hook into the cputime accounting. This makes the interface independent from the CPU timer interrupts, and makes the virtual timers global as opposed to per-cpu. Overall the code is greatly simplified. The downside is that the accuracy is not as good as the original implementation, but it is still good enough for appldata. Reviewed-by: Jan Glauber Reviewed-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/s390/kernel/smp.c') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index eeb441bbddae..5481da80926a 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include #include #include @@ -917,7 +917,7 @@ static ssize_t show_idle_count(struct device *dev, do { sequence = ACCESS_ONCE(idle->sequence); idle_count = ACCESS_ONCE(idle->idle_count); - if (ACCESS_ONCE(idle->idle_enter)) + if (ACCESS_ONCE(idle->clock_idle_enter)) idle_count++; } while ((sequence & 1) || (idle->sequence != sequence)); return sprintf(buf, "%llu\n", idle_count); @@ -935,8 +935,8 @@ static ssize_t show_idle_time(struct device *dev, now = get_clock(); sequence = ACCESS_ONCE(idle->sequence); idle_time = ACCESS_ONCE(idle->idle_time); - idle_enter = ACCESS_ONCE(idle->idle_enter); - idle_exit = ACCESS_ONCE(idle->idle_exit); + idle_enter = ACCESS_ONCE(idle->clock_idle_enter); + idle_exit = ACCESS_ONCE(idle->clock_idle_exit); } while ((sequence & 1) || (idle->sequence != sequence)); idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; return sprintf(buf, "%llu\n", idle_time >> 12); -- cgit v1.2.1