From 3b885787ea4112eaa80945999ea0901bf742707f Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Mon, 12 Oct 2009 13:26:31 -0700 Subject: net: Generalize socket rx gap / receive queue overflow cmsg Create a new socket level option to report number of queue overflows Recently I augmented the AF_PACKET protocol to report the number of frames lost on the socket receive queue between any two enqueued frames. This value was exported via a SOL_PACKET level cmsg. AFter I completed that work it was requested that this feature be generalized so that any datagram oriented socket could make use of this option. As such I've created this patch, It creates a new SOL_SOCKET level option called SO_RXQ_OVFL, which when enabled exports a SOL_SOCKET level cmsg that reports the nubmer of times the sk_receive_queue overflowed between any two given frames. It also augments the AF_PACKET protocol to take advantage of this new feature (as it previously did not touch sk->sk_drops, which this patch uses to record the overflow count). Tested successfully by me. Notes: 1) Unlike my previous patch, this patch simply records the sk_drops value, which is not a number of drops between packets, but rather a total number of drops. Deltas must be computed in user space. 2) While this patch currently works with datagram oriented protocols, it will also be accepted by non-datagram oriented protocols. I'm not sure if thats agreeable to everyone, but my argument in favor of doing so is that, for those protocols which aren't applicable to this option, sk_drops will always be zero, and reporting no drops on a receive queue that isn't used for those non-participating protocols seems reasonable to me. This also saves us having to code in a per-protocol opt in mechanism. 3) This applies cleanly to net-next assuming that commit 977750076d98c7ff6cbda51858bb5a5894a9d9ab (my af packet cmsg patch) is reverted Signed-off-by: Neil Horman Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- arch/s390/include/asm/socket.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/socket.h b/arch/s390/include/asm/socket.h index e42df89a0b85..fdff1e995c73 100644 --- a/arch/s390/include/asm/socket.h +++ b/arch/s390/include/asm/socket.h @@ -68,4 +68,6 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _ASM_SOCKET_H */ -- cgit v1.2.1 From c44fc770845163f8d9e573f37f92a7b7a7ade14e Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 19 Sep 2009 06:50:42 +0200 Subject: tracing: Move syscalls metadata handling from arch to core Most of the syscalls metadata processing is done from arch. But these operations are mostly generic accross archs. Especially now that we have a common variable name that expresses the number of syscalls supported by an arch: NR_syscalls, the only remaining bits that need to reside in arch is the syscall nr to addr translation. v2: Compare syscalls symbols only after the "sys" prefix so that we avoid spurious mismatches with archs that have syscalls wrappers, in which case syscalls symbols have "SyS" prefixed aliases. (Reported by: Heiko Carstens) Signed-off-by: Frederic Weisbecker Acked-by: Heiko Carstens Cc: Ingo Molnar Cc: Steven Rostedt Cc: Li Zefan Cc: Masami Hiramatsu Cc: Jason Baron Cc: Lai Jiangshan Cc: Martin Schwidefsky Cc: Paul Mundt --- arch/s390/kernel/ftrace.c | 67 ++--------------------------------------------- 1 file changed, 2 insertions(+), 65 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 57bdcb1e3cdf..7c5752c3423d 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -206,73 +206,10 @@ out: #ifdef CONFIG_FTRACE_SYSCALLS -extern unsigned long __start_syscalls_metadata[]; -extern unsigned long __stop_syscalls_metadata[]; extern unsigned int sys_call_table[]; -static struct syscall_metadata **syscalls_metadata; - -struct syscall_metadata *syscall_nr_to_meta(int nr) -{ - if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) - return NULL; - - return syscalls_metadata[nr]; -} - -int syscall_name_to_nr(char *name) -{ - int i; - - if (!syscalls_metadata) - return -1; - for (i = 0; i < NR_syscalls; i++) - if (syscalls_metadata[i]) - if (!strcmp(syscalls_metadata[i]->name, name)) - return i; - return -1; -} - -void set_syscall_enter_id(int num, int id) -{ - syscalls_metadata[num]->enter_id = id; -} - -void set_syscall_exit_id(int num, int id) +unsigned long __init arch_syscall_addr(int nr) { - syscalls_metadata[num]->exit_id = id; -} - -static struct syscall_metadata *find_syscall_meta(unsigned long syscall) -{ - struct syscall_metadata *start; - struct syscall_metadata *stop; - char str[KSYM_SYMBOL_LEN]; - - start = (struct syscall_metadata *)__start_syscalls_metadata; - stop = (struct syscall_metadata *)__stop_syscalls_metadata; - kallsyms_lookup(syscall, NULL, NULL, NULL, str); - - for ( ; start < stop; start++) { - if (start->name && !strcmp(start->name + 3, str + 3)) - return start; - } - return NULL; -} - -static int __init arch_init_ftrace_syscalls(void) -{ - struct syscall_metadata *meta; - int i; - syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, - GFP_KERNEL); - if (!syscalls_metadata) - return -ENOMEM; - for (i = 0; i < NR_syscalls; i++) { - meta = find_syscall_meta((unsigned long)sys_call_table[i]); - syscalls_metadata[i] = meta; - } - return 0; + return (unsigned long)sys_call_table[nr]; } -arch_initcall(arch_init_ftrace_syscalls); #endif -- cgit v1.2.1 From 7874b1b66a53c4d9c8dcb37884cbb758aa2d712c Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Wed, 14 Oct 2009 12:43:44 +0200 Subject: [S390] hypfs: Use subcode 6 if subcode 7 is not available Hypfs never worked on systems that only provide D204 subcode 6. In these cases we nevertheless used subcode 7. With this fix, we use subcode 6, if it is available and the system does not provide subcode 7. Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky --- arch/s390/hypfs/hypfs_diag.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index 704dd396257b..77df726180ba 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -438,7 +438,7 @@ static int diag204_probe(void) } if (diag204((unsigned long)SUBC_STIB6 | (unsigned long)INFO_EXT, pages, buf) >= 0) { - diag204_store_sc = SUBC_STIB7; + diag204_store_sc = SUBC_STIB6; diag204_info_type = INFO_EXT; goto out; } -- cgit v1.2.1 From be6e3f9cd639fec5882fca16e058843c3064c6c9 Mon Sep 17 00:00:00 2001 From: Andreas Krebbel Date: Wed, 14 Oct 2009 12:43:47 +0200 Subject: [S390] Add highgprs facility to /proc/cpuinfo This patch makes the hwcap bit for the high gprs feature to be visible in /proc/cpuinfo. Signed-off-by: Andreas Krebbel Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/processor.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 802c8ab247f3..0729f36c2fe3 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -31,9 +31,9 @@ void __cpuinit print_cpu_info(void) static int show_cpuinfo(struct seq_file *m, void *v) { - static const char *hwcap_str[9] = { + static const char *hwcap_str[10] = { "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", - "edat", "etf3eh" + "edat", "etf3eh", "highgprs" }; struct _lowcore *lc; unsigned long n = (unsigned long) v - 1; @@ -48,7 +48,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) num_online_cpus(), loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ))%100); seq_puts(m, "features\t: "); - for (i = 0; i < 9; i++) + for (i = 0; i < 10; i++) if (hwcap_str[i] && (elf_hwcap & (1UL << i))) seq_printf(m, "%s ", hwcap_str[i]); seq_puts(m, "\n"); -- cgit v1.2.1 From ca1b82ba0888e742a7efdb89ed8e2aab453e091f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 10 Oct 2009 10:21:03 +0200 Subject: s390: Remove BKL from prng cycle_kernel_lock() was added during the big BKL pushdown. It should ensure the serializiation against driver init code. In this case there is nothing to serialize. Remove it. Signed-off-by: Thomas Gleixner Cc: Martin Schwidefsky LKML-Reference: <20091010153349.601625576@linutronix.de> Acked-by: Jan Glauber --- arch/s390/crypto/prng.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index b49c00ce65e9..a3209906739e 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -49,7 +48,6 @@ static unsigned char parm_block[32] = { static int prng_open(struct inode *inode, struct file *file) { - cycle_kernel_lock(); return nonseekable_open(inode, file); } -- cgit v1.2.1 From 4f8048ee734dab7c463574797b820c0c68c80791 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 29 Oct 2009 15:04:09 +0100 Subject: [S390] smp: fix prefix handling of offlined cpus Offlined cpus still have valid prefix register contents. Dumpers will store the register contents of a cpu to the location where its prefix register points to. For offlined cpus the area (lowcore) has been freed and the dumper would write the uninteresting contents of the offline cpu to a memory location which might be in use by some other component and destroy valueable information. To fix this set the prefix register of offline cpus to absolute address zero again. This prevents the current dumpers to write to random memory locations. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index c932caa5e850..c699ac538c49 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -638,6 +638,8 @@ void __cpu_die(unsigned int cpu) /* Wait until target cpu is down */ while (!cpu_stopped(cpu)) cpu_relax(); + while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy) + udelay(10); smp_free_lowcore(cpu); pr_info("Processor %d stopped\n", cpu); } -- cgit v1.2.1 From 70f5dc514c0b183ee813dc3b3983b04891fd1e7a Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 29 Oct 2009 15:04:12 +0100 Subject: [S390] cputime: fix overflow on 31 bit systems The cputime_to_msecs / cputime_to_clock_t and cputime64_to_clock_t cause fixpoint divide exceptions if the cputime is too large. On a machine that collected 49.7 days worth of idle time reading from /proc/stat will generate oopses like this: Kernel BUG at 001b0c92 [verbose debug info unavailable] fixpoint divide exception: 0009 [#13] SMP Modules linked in: ipv6 CPU: 1 Tainted: G D 2.6.27.10 #5 Process cat (pid: 21352, task: 1fb34138, ksp: 1d2a3d98) Krnl PSW : 070c2000 801b0c92 (show_stat+0x2ca/0x68c) R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:0 CC:2 PM:0 Krnl GPRS: 00000001 00001388 00000bb8 0015d2a1 00000000 00000000 000003e8 0001fd91 00000000 00000000 0000129d eecd2ff0 1cc533b9 0036f780 801b0bce 1d2a3cc0 Krnl Code: 801b0c86: f18890abf198 mvo 171(9,%r9),408(9,%r15) 801b0c8c: 98abf170 lm %r10,%r11,368(%r15) 801b0c90: 1da1 dr %r10,%r1 >801b0c92: 90abf170 stm %r10,%r11,368(%r15) 801b0c96: 98abf190 lm %r10,%r11,400(%r15) 801b0c9a: 1da1 dr %r10,%r1 801b0c9c: 90abf190 stm %r10,%r11,400(%r15) 801b0ca0: 18a3 lr %r10,%r3 Call Trace: ([<00000000001b09f4>] show_stat+0x2c/0x68c) [<000000000018dcee>] seq_read+0xb2/0x364 [<00000000001a9980>] proc_reg_read+0x68/0x98 [<00000000001705ee>] vfs_read+0x6e/0xe8 [<0000000000170732>] sys_read+0x36/0x78 [<000000000010f750>] sysc_do_restart+0x12/0x16 [<0000000077f3ad6a>] 0x77f3ad6a <4>---[ end trace 1436ea9559d3de9e ]--- Reported-by: Mike Frysinger Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/cputime.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 24b1244aadb9..f23961ada7fb 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -78,7 +78,7 @@ cputime64_to_jiffies64(cputime64_t cputime) static inline unsigned int cputime_to_msecs(const cputime_t cputime) { - return __div(cputime, 4096000); + return cputime_div(cputime, 4096000); } static inline cputime_t @@ -160,7 +160,7 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value) static inline clock_t cputime_to_clock_t(cputime_t cputime) { - return __div(cputime, 4096000000ULL / USER_HZ); + return cputime_div(cputime, 4096000000ULL / USER_HZ); } static inline cputime_t @@ -175,7 +175,7 @@ clock_t_to_cputime(unsigned long x) static inline clock_t cputime64_to_clock_t(cputime64_t cputime) { - return __div(cputime, 4096000000ULL / USER_HZ); + return cputime_div(cputime, 4096000000ULL / USER_HZ); } struct s390_idle_data { -- cgit v1.2.1 From f8501ba77d69c88a65e4ebbe03bdc65b1edb0b86 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 29 Oct 2009 15:04:13 +0100 Subject: [S390] smp: fix sigp stop handling According to the architecture a cpu must not necessarily enter stopped state after completion of a sigp instruction with "stop" order code. So remove the BUG() statement after self sending sigp stop to avoid that it ever gets reached. Also add a sigp busy check to make sure that the order gets delivered. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/ipl.c | 7 +++---- arch/s390/kernel/smp.c | 4 ++-- arch/s390/kernel/swsusp_asm64.S | 1 + 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index ee57a42e6e93..4890ac6d7faa 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -1595,10 +1595,9 @@ static void stop_run(struct shutdown_trigger *trigger) { if (strcmp(trigger->name, ON_PANIC_STR) == 0) disabled_wait((unsigned long) __builtin_return_address(0)); - else { - signal_processor(smp_processor_id(), sigp_stop); - for (;;); - } + while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) + cpu_relax(); + for (;;); } static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR, diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index c699ac538c49..c99c45b848e3 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -647,8 +647,8 @@ void __cpu_die(unsigned int cpu) void cpu_die(void) { idle_task_exit(); - signal_processor(smp_processor_id(), sigp_stop); - BUG(); + while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) + cpu_relax(); for (;;); } diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 7c8653e27db6..0f4ef3b856d9 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -199,6 +199,7 @@ pgm_check_entry: brc 2,4b /* busy, try again */ 5: sigp %r9,%r2,__SIGP_STOP /* stop resume (current) CPU */ + brc 2,5b /* busy, try again */ 6: j 6b restart_suspend: -- cgit v1.2.1 From b3dcf3de8e4d71d79235fc67b6c5def6506c27f8 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 29 Oct 2009 15:04:14 +0100 Subject: [S390] smp: fix sigp sense handling sigp sense only returns the status of a cpu if it is non zero. If the status of the sensed cpu is all zeros condition code 0 (accpeted) is set and no status bits are returned. The current code however assumes that a status was returned and tests bits in it. This means uninitalized data is accessed with random results. Worst case is that the code that checks if cpu is offline on cpu hotplug assumes that the target cpu is offline while it is still running. This leads potentially to memory corruption since resources that are still needed by the target cpu will be freed and could be resused while still in use. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 1 - arch/s390/kernel/swsusp_asm64.S | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index c99c45b848e3..93e52039321b 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -76,7 +76,6 @@ static int cpu_stopped(int cpu) __u32 status; switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { - case sigp_order_code_accepted: case sigp_status_stored: /* Check for stopped and check stop state */ if (status & 0x50) diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 0f4ef3b856d9..0c26cc1898ec 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -207,6 +207,7 @@ restart_suspend: llgh %r2,0(%r1) 7: sigp %r9,%r2,__SIGP_SENSE /* Wait for resume CPU */ + brc 8,7b /* accepted, status 0, still running */ brc 2,7b /* busy, try again */ tmll %r9,0x40 /* Test if resume CPU is stopped */ jz 7b -- cgit v1.2.1 From 3c5d92a0cfb5103c0d5ab74d4ae6373d3af38148 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 29 Sep 2009 14:25:16 +0200 Subject: nohz: Introduce arch_needs_cpu Allow the architecture to request a normal jiffy tick when the system goes idle and tick_nohz_stop_sched_tick is called . On s390 the hook is used to prevent the system going fully idle if there has been an interrupt other than a clock comparator interrupt since the last wakeup. On s390 the HiperSockets response time for 1 connection ping-pong goes down from 42 to 34 microseconds. The CPU cost decreases by 27%. Signed-off-by: Martin Schwidefsky LKML-Reference: <20090929122533.402715150@de.ibm.com> Signed-off-by: Thomas Gleixner --- arch/s390/include/asm/cputime.h | 8 ++++++++ arch/s390/kernel/s390_ext.c | 2 ++ arch/s390/kernel/vtime.c | 2 ++ 3 files changed, 12 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 24b1244aadb9..95f3561517c7 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -183,6 +183,7 @@ struct s390_idle_data { unsigned long long idle_count; unsigned long long idle_enter; unsigned long long idle_time; + int nohz_delay; }; DECLARE_PER_CPU(struct s390_idle_data, s390_idle); @@ -198,4 +199,11 @@ static inline void s390_idle_check(void) vtime_start_cpu(); } +static inline int s390_nohz_delay(int cpu) +{ + return per_cpu(s390_idle, cpu).nohz_delay != 0; +} + +#define arch_needs_cpu(cpu) s390_nohz_delay(cpu) + #endif /* _S390_CPUTIME_H */ diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c index 0de305b598ce..59618bcd99b7 100644 --- a/arch/s390/kernel/s390_ext.c +++ b/arch/s390/kernel/s390_ext.c @@ -126,6 +126,8 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned short code) /* Serve timer interrupts first. */ clock_comparator_work(); kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; + if (code != 0x1004) + __get_cpu_var(s390_idle).nohz_delay = 1; index = ext_hash(code); for (p = ext_int_hash[index]; p; p = p->next) { if (likely(p->code == code)) diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index c41bb0d416e1..b59a812a010e 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -167,6 +167,8 @@ void vtime_stop_cpu(void) /* Wait for external, I/O or machine check interrupt. */ psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT; + idle->nohz_delay = 0; + /* Check if the CPU timer needs to be reprogrammed. */ if (vq->do_spt) { __u64 vmax = VTIMER_MAX_SLICE; -- cgit v1.2.1 From 4ddf61ea99c1bbed4dcd889803c73213e1b3256b Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 3 Apr 2009 00:52:35 -0700 Subject: sysctl: s390 Use the compat_sys_sysctl Now that we have a generic 32bit compatibility implementation there is no need for s390 to implement it's own. Cc: Heiko Carstens Acked-by: Martin Schwidefsky Acked-by: Arnd Bergmann Acked-by: Christian Borntraeger Signed-off-by: Eric W. Biederman --- arch/s390/kernel/compat_linux.c | 53 --------------------------------------- arch/s390/kernel/compat_linux.h | 2 -- arch/s390/kernel/compat_wrapper.S | 6 ++--- 3 files changed, 2 insertions(+), 59 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 0debcec23a39..fda1a8123f9b 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -527,59 +527,6 @@ asmlinkage long sys32_sendfile64(int out_fd, int in_fd, return ret; } -#ifdef CONFIG_SYSCTL_SYSCALL -struct __sysctl_args32 { - u32 name; - int nlen; - u32 oldval; - u32 oldlenp; - u32 newval; - u32 newlen; - u32 __unused[4]; -}; - -asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args) -{ - struct __sysctl_args32 tmp; - int error; - size_t oldlen; - size_t __user *oldlenp = NULL; - unsigned long addr = (((unsigned long)&args->__unused[0]) + 7) & ~7; - - if (copy_from_user(&tmp, args, sizeof(tmp))) - return -EFAULT; - - if (tmp.oldval && tmp.oldlenp) { - /* Duh, this is ugly and might not work if sysctl_args - is in read-only memory, but do_sysctl does indirectly - a lot of uaccess in both directions and we'd have to - basically copy the whole sysctl.c here, and - glibc's __sysctl uses rw memory for the structure - anyway. */ - if (get_user(oldlen, (u32 __user *)compat_ptr(tmp.oldlenp)) || - put_user(oldlen, (size_t __user *)addr)) - return -EFAULT; - oldlenp = (size_t __user *)addr; - } - - lock_kernel(); - error = do_sysctl(compat_ptr(tmp.name), tmp.nlen, compat_ptr(tmp.oldval), - oldlenp, compat_ptr(tmp.newval), tmp.newlen); - unlock_kernel(); - if (oldlenp) { - if (!error) { - if (get_user(oldlen, (size_t __user *)addr) || - put_user(oldlen, (u32 __user *)compat_ptr(tmp.oldlenp))) - error = -EFAULT; - } - if (copy_to_user(args->__unused, tmp.__unused, - sizeof(tmp.__unused))) - error = -EFAULT; - } - return error; -} -#endif - struct stat64_emu31 { unsigned long long st_dev; unsigned int __pad1; diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h index c07f9ca05ade..45e9092b3aad 100644 --- a/arch/s390/kernel/compat_linux.h +++ b/arch/s390/kernel/compat_linux.h @@ -162,7 +162,6 @@ struct ucontext32 { compat_sigset_t uc_sigmask; /* mask last for extensibility */ }; -struct __sysctl_args32; struct stat64_emu31; struct mmap_arg_struct_emu31; struct fadvise64_64_args; @@ -212,7 +211,6 @@ long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, size_t count); long sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count); -long sys32_sysctl(struct __sysctl_args32 __user *args); long sys32_stat64(char __user * filename, struct stat64_emu31 __user * statbuf); long sys32_lstat64(char __user * filename, struct stat64_emu31 __user * statbuf); diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index cbd9901dc0f8..30de2d0e52bb 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -689,8 +689,6 @@ sys32_fdatasync_wrapper: llgfr %r2,%r2 # unsigned int jg sys_fdatasync # branch to system call -#sys32_sysctl_wrapper # tbd - .globl sys32_mlock_wrapper sys32_mlock_wrapper: llgfr %r2,%r2 # unsigned long @@ -1087,8 +1085,8 @@ sys32_stime_wrapper: .globl sys32_sysctl_wrapper sys32_sysctl_wrapper: - llgtr %r2,%r2 # struct __sysctl_args32 * - jg sys32_sysctl + llgtr %r2,%r2 # struct compat_sysctl_args * + jg compat_sys_sysctl .globl sys32_fstat64_wrapper sys32_fstat64_wrapper: -- cgit v1.2.1 From e576b9ef41c2c1b13e0c123b6e9d0589723c68bf Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 10 Nov 2009 07:54:52 +0000 Subject: s390: use RCU to walk list of network devices This is similar to other cases where for_each_netdev_rcu can be used when gathering information. By inspection, don't have platform or cross-build environment to validate. Signed-off-by: Stephen Hemminger Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- arch/s390/appldata/appldata_net_sum.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index fa741f84c5b9..4ce7fa95880f 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c @@ -83,8 +83,9 @@ static void appldata_get_net_sum_data(void *data) rx_dropped = 0; tx_dropped = 0; collisions = 0; - read_lock(&dev_base_lock); - for_each_netdev(&init_net, dev) { + + rcu_read_lock(); + for_each_netdev_rcu(&init_net, dev) { const struct net_device_stats *stats = dev_get_stats(dev); rx_packets += stats->rx_packets; @@ -98,7 +99,8 @@ static void appldata_get_net_sum_data(void *data) collisions += stats->collisions; i++; } - read_unlock(&dev_base_lock); + rcu_read_unlock(); + net_data->nr_interfaces = i; net_data->rx_packets = rx_packets; net_data->tx_packets = tx_packets; -- cgit v1.2.1 From b05fd35d9146c184e1903a26b6516f1660ca230f Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 3 Apr 2009 04:36:34 -0700 Subject: sysctl s390: Remove dead sysctl binary support Now that sys_sysctl is a generic wrapper around /proc/sys .ctl_name and .strategy members of sysctl tables are dead code. Remove them. Cc: Martin Schwidefsky Cc: Heiko Carstens Signed-off-by: Eric W. Biederman --- arch/s390/kernel/debug.c | 9 ++------- arch/s390/mm/cmm.c | 5 ++--- 2 files changed, 4 insertions(+), 10 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 20f282c911c2..adf11260260a 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -893,35 +893,30 @@ s390dbf_procactive(ctl_table *table, int write, static struct ctl_table s390dbf_table[] = { { - .ctl_name = CTL_S390DBF_STOPPABLE, .procname = "debug_stoppable", .data = &debug_stoppable, .maxlen = sizeof(int), .mode = S_IRUGO | S_IWUSR, .proc_handler = &proc_dointvec, - .strategy = &sysctl_intvec, }, { - .ctl_name = CTL_S390DBF_ACTIVE, .procname = "debug_active", .data = &debug_active, .maxlen = sizeof(int), .mode = S_IRUGO | S_IWUSR, .proc_handler = &s390dbf_procactive, - .strategy = &sysctl_intvec, }, - { .ctl_name = 0 } + { } }; static struct ctl_table s390dbf_dir_table[] = { { - .ctl_name = CTL_S390DBF, .procname = "s390dbf", .maxlen = 0, .mode = S_IRUGO | S_IXUGO, .child = s390dbf_table, }, - { .ctl_name = 0 } + { } }; static struct ctl_table_header *s390dbf_sysctl_header; diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index b201135cc18c..dab3e4a7582e 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -355,18 +355,17 @@ static struct ctl_table cmm_table[] = { .mode = 0644, .proc_handler = &cmm_timeout_handler, }, - { .ctl_name = 0 } + { } }; static struct ctl_table cmm_dir_table[] = { { - .ctl_name = CTL_VM, .procname = "vm", .maxlen = 0, .mode = 0555, .child = cmm_table, }, - { .ctl_name = 0 } + { } }; #endif -- cgit v1.2.1 From b89031e087a47819be48028e62cebade5f9fb75b Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 13 Nov 2009 15:43:52 +0100 Subject: [S390] reset cputime accounting after IPL from NSS After an IPL from NSS the uptime of the system is incorrect. The reason is that the startup code in head.S is not executed in case of an IPL from NSS. Due to that sched_clock_base_cc which is used to initialze wall_to_monotonic contains the time stamp when the NSS has been created instead of the time stamp of the system start. Reinitialize the cputime accounting values in create_kernel_nss after the SAVESYS CP command that created the NSS segment. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/early.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index bf8b4ae7ff2d..e49e9e0c69fd 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -55,6 +55,7 @@ static void __init reset_tod_clock(void) disabled_wait(0); sched_clock_base_cc = TOD_UNIX_EPOCH; + S390_lowcore.last_update_clock = sched_clock_base_cc; } #ifdef CONFIG_SHARED_KERNEL @@ -167,6 +168,14 @@ static noinline __init void create_kernel_nss(void) return; } + /* re-initialize cputime accounting. */ + sched_clock_base_cc = get_clock(); + S390_lowcore.last_update_clock = sched_clock_base_cc; + S390_lowcore.last_update_timer = 0x7fffffffffffffffULL; + S390_lowcore.user_timer = 0; + S390_lowcore.system_timer = 0; + asm volatile("SPT 0(%0)" : : "a" (&S390_lowcore.last_update_timer)); + /* re-setup boot command line with new ipl vm parms */ ipl_update_parameters(); setup_boot_command_line(); -- cgit v1.2.1 From bcc6525fb23d2cec7ffdf908d98826a66823bcb2 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Fri, 13 Nov 2009 15:43:54 +0100 Subject: [S390] s390: fix single stepping on svc0 On s390 there are two ways of specifying the system call number for the svc instruction. The standard way is to use the immediate field in the instruction (or to use EXecute for values unknown during assemble time). This can encode 256 system calls. The kernel ABI also allows to put the system call number in r1 and then execute svc 0 to enable system call numbers > 255. It turns out that single stepping svc 0 is broken, since the PER program check handler uses r1. We have to use a different register. Signed-off-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/entry.S | 8 ++++---- arch/s390/kernel/entry64.S | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index f43d2ee54464..48215d15762b 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -565,10 +565,10 @@ pgm_svcper: lh %r7,0x8a # get svc number from lowcore l %r9,__LC_THREAD_INFO # load pointer to thread_info struct TRACE_IRQS_OFF - l %r1,__TI_task(%r9) - mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID - mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS - mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID + l %r8,__TI_task(%r9) + mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID + mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS + mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP TRACE_IRQS_ON stosm __SF_EMPTY(%r15),0x03 # reenable interrupts diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index a6f7b20df616..9aff1d449b6e 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -543,10 +543,10 @@ pgm_svcper: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct - lg %r1,__TI_task(%r9) - mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID - mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS - mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID + lg %r8,__TI_task(%r9) + mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID + mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS + mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP TRACE_IRQS_ON stosm __SF_EMPTY(%r15),0x03 # reenable interrupts -- cgit v1.2.1 From 6beb000923882f6204ea2cfcd932e568e900803f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 9 Nov 2009 15:21:34 +0000 Subject: locking: Make inlining decision Kconfig based commit 892a7c67 (locking: Allow arch-inlined spinlocks) implements the selection of which lock functions are inlined based on defines in arch/.../spinlock.h: #define __always_inline__LOCK_FUNCTION Despite of the name __always_inline__* the lock functions can be built out of line depending on config options. Also if the arch does not set some inline defines the generic code might set them; again depending on config options. This makes it unnecessary hard to figure out when and which lock functions are inlined. Aside of that it makes it way harder and messier for -rt to manipulate the lock functions. Convert the inlining decision to CONFIG switches. Each lock function is inlined depending on CONFIG_INLINE_*. The configs implement the existing dependencies. The architecture code can select ARCH_INLINE_* to signal that it wants the corresponding lock function inlined. ARCH_INLINE_* is necessary as Kconfig ignores "depends on" restrictions when a config element is selected. No functional change. Signed-off-by: Thomas Gleixner LKML-Reference: <20091109151428.504477141@linutronix.de> Acked-by: Heiko Carstens Reviewed-by: Ingo Molnar Acked-by: Peter Zijlstra --- arch/s390/Kconfig | 28 ++++++++++++++++++++++++++++ arch/s390/include/asm/spinlock.h | 29 ----------------------------- 2 files changed, 28 insertions(+), 29 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 43c0acad7160..16c673096a22 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -95,6 +95,34 @@ config S390 select HAVE_ARCH_TRACEHOOK select INIT_ALL_POSSIBLE select HAVE_PERF_EVENTS + select ARCH_INLINE_SPIN_TRYLOCK + select ARCH_INLINE_SPIN_TRYLOCK_BH + select ARCH_INLINE_SPIN_LOCK + select ARCH_INLINE_SPIN_LOCK_BH + select ARCH_INLINE_SPIN_LOCK_IRQ + select ARCH_INLINE_SPIN_LOCK_IRQSAVE + select ARCH_INLINE_SPIN_UNLOCK + select ARCH_INLINE_SPIN_UNLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK_IRQ + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE + select ARCH_INLINE_READ_TRYLOCK + select ARCH_INLINE_READ_LOCK + select ARCH_INLINE_READ_LOCK_BH + select ARCH_INLINE_READ_LOCK_IRQ + select ARCH_INLINE_READ_LOCK_IRQSAVE + select ARCH_INLINE_READ_UNLOCK + select ARCH_INLINE_READ_UNLOCK_BH + select ARCH_INLINE_READ_UNLOCK_IRQ + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE + select ARCH_INLINE_WRITE_TRYLOCK + select ARCH_INLINE_WRITE_LOCK + select ARCH_INLINE_WRITE_LOCK_BH + select ARCH_INLINE_WRITE_LOCK_IRQ + select ARCH_INLINE_WRITE_LOCK_IRQSAVE + select ARCH_INLINE_WRITE_UNLOCK + select ARCH_INLINE_WRITE_UNLOCK_BH + select ARCH_INLINE_WRITE_UNLOCK_IRQ + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE config SCHED_OMIT_FRAME_POINTER bool diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 41ce6861174e..c9af0d19c7ab 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -191,33 +191,4 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) #define _raw_read_relax(lock) cpu_relax() #define _raw_write_relax(lock) cpu_relax() -#define __always_inline__spin_lock -#define __always_inline__read_lock -#define __always_inline__write_lock -#define __always_inline__spin_lock_bh -#define __always_inline__read_lock_bh -#define __always_inline__write_lock_bh -#define __always_inline__spin_lock_irq -#define __always_inline__read_lock_irq -#define __always_inline__write_lock_irq -#define __always_inline__spin_lock_irqsave -#define __always_inline__read_lock_irqsave -#define __always_inline__write_lock_irqsave -#define __always_inline__spin_trylock -#define __always_inline__read_trylock -#define __always_inline__write_trylock -#define __always_inline__spin_trylock_bh -#define __always_inline__spin_unlock -#define __always_inline__read_unlock -#define __always_inline__write_unlock -#define __always_inline__spin_unlock_bh -#define __always_inline__read_unlock_bh -#define __always_inline__write_unlock_bh -#define __always_inline__spin_unlock_irq -#define __always_inline__read_unlock_irq -#define __always_inline__write_unlock_irq -#define __always_inline__spin_unlock_irqrestore -#define __always_inline__read_unlock_irqrestore -#define __always_inline__write_unlock_irqrestore - #endif /* __ASM_SPINLOCK_H */ -- cgit v1.2.1 From 0696b711e4be45fa104c12329f617beb29c03f78 Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Tue, 17 Nov 2009 13:49:50 +0800 Subject: timekeeping: Fix clock_gettime vsyscall time warp Since commit 0a544198 "timekeeping: Move NTP adjusted clock multiplier to struct timekeeper" the clock multiplier of vsyscall is updated with the unmodified clock multiplier of the clock source and not with the NTP adjusted multiplier of the timekeeper. This causes user space observerable time warps: new CLOCK-warp maximum: 120 nsecs, 00000025c337c537 -> 00000025c337c4bf Add a new argument "mult" to update_vsyscall() and hand in the timekeeping internal NTP adjusted multiplier. Signed-off-by: Lin Ming Cc: "Zhang Yanmin" Cc: Martin Schwidefsky Cc: Benjamin Herrenschmidt Cc: Tony Luck LKML-Reference: <1258436990.17765.83.camel@minggr.sh.intel.com> Signed-off-by: Thomas Gleixner --- arch/s390/kernel/time.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 34162a0b2caa..68e1ecf5ebab 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -214,7 +214,8 @@ struct clocksource * __init clocksource_default_clock(void) return &clocksource_tod; } -void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) +void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, + u32 mult) { if (clock != &clocksource_tod) return; -- cgit v1.2.1 From 6d4561110a3e9fa742aeec6717248a491dfb1878 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 16 Nov 2009 03:11:48 -0800 Subject: sysctl: Drop & in front of every proc_handler. For consistency drop & in front of every proc_handler. Explicity taking the address is unnecessary and it prevents optimizations like stubbing the proc_handlers to NULL. Cc: Alexey Dobriyan Cc: Ingo Molnar Cc: Joe Perches Signed-off-by: Eric W. Biederman --- arch/s390/appldata/appldata_base.c | 4 ++-- arch/s390/kernel/debug.c | 4 ++-- arch/s390/mm/cmm.c | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index b55fd7ed1c31..495589950dc7 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -61,12 +61,12 @@ static struct ctl_table appldata_table[] = { { .procname = "timer", .mode = S_IRUGO | S_IWUSR, - .proc_handler = &appldata_timer_handler, + .proc_handler = appldata_timer_handler, }, { .procname = "interval", .mode = S_IRUGO | S_IWUSR, - .proc_handler = &appldata_interval_handler, + .proc_handler = appldata_interval_handler, }, { }, }; diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index adf11260260a..071c81f179ef 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -897,14 +897,14 @@ static struct ctl_table s390dbf_table[] = { .data = &debug_stoppable, .maxlen = sizeof(int), .mode = S_IRUGO | S_IWUSR, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "debug_active", .data = &debug_active, .maxlen = sizeof(int), .mode = S_IRUGO | S_IWUSR, - .proc_handler = &s390dbf_procactive, + .proc_handler = s390dbf_procactive, }, { } }; diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index dab3e4a7582e..ff58779bf7e9 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -343,17 +343,17 @@ static struct ctl_table cmm_table[] = { { .procname = "cmm_pages", .mode = 0644, - .proc_handler = &cmm_pages_handler, + .proc_handler = cmm_pages_handler, }, { .procname = "cmm_timed_pages", .mode = 0644, - .proc_handler = &cmm_pages_handler, + .proc_handler = cmm_pages_handler, }, { .procname = "cmm_timeout", .mode = 0644, - .proc_handler = &cmm_timeout_handler, + .proc_handler = cmm_timeout_handler, }, { } }; -- cgit v1.2.1 From 2d4dc890b5c8fabd818a8586607e6843c4375e62 Mon Sep 17 00:00:00 2001 From: Ilya Loginov Date: Thu, 26 Nov 2009 09:16:19 +0100 Subject: block: add helpers to run flush_dcache_page() against a bio and a request's pages Mtdblock driver doesn't call flush_dcache_page for pages in request. So, this causes problems on architectures where the icache doesn't fill from the dcache or with dcache aliases. The patch fixes this. The ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE symbol was introduced to avoid pointless empty cache-thrashing loops on architectures for which flush_dcache_page() is a no-op. Every architecture was provided with this flush pages on architectires where ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE is equal 1 or do nothing otherwise. See "fix mtd_blkdevs problem with caches on some architectures" discussion on LKML for more information. Signed-off-by: Ilya Loginov Cc: Ingo Molnar Cc: David Woodhouse Cc: Peter Horton Cc: "Ed L. Cashin" Signed-off-by: Jens Axboe --- arch/s390/include/asm/cacheflush.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h index 49d5af916d01..405cc97c6249 100644 --- a/arch/s390/include/asm/cacheflush.h +++ b/arch/s390/include/asm/cacheflush.h @@ -10,6 +10,7 @@ #define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) -- cgit v1.2.1 From 367e1319b229110a27c53221c2fa32a6aa86d4a9 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 26 Aug 2009 14:57:07 +0300 Subject: KVM: Return -ENOTTY on unrecognized ioctls Not the incorrect -EINVAL. Signed-off-by: Avi Kivity --- arch/s390/kvm/kvm-s390.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 07ced89740d7..00e2ce8e91f5 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -150,7 +150,7 @@ long kvm_arch_vm_ioctl(struct file *filp, break; } default: - r = -EINVAL; + r = -ENOTTY; } return r; -- cgit v1.2.1 From 10474ae8945ce08622fd1f3464e55bd817bf2376 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 15 Sep 2009 11:37:46 +0200 Subject: KVM: Activate Virtualization On Demand X86 CPUs need to have some magic happening to enable the virtualization extensions on them. This magic can result in unpleasant results for users, like blocking other VMMs from working (vmx) or using invalid TLB entries (svm). Currently KVM activates virtualization when the respective kernel module is loaded. This blocks us from autoloading KVM modules without breaking other VMMs. To circumvent this problem at least a bit, this patch introduces on demand activation of virtualization. This means, that instead virtualization is enabled on creation of the first virtual machine and disabled on destruction of the last one. So using this, KVM can be easily autoloaded, while keeping other hypervisors usable. Signed-off-by: Alexander Graf Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/s390/kvm/kvm-s390.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 00e2ce8e91f5..544505893c9f 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -74,9 +74,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { static unsigned long long *facilities; /* Section: not file related */ -void kvm_arch_hardware_enable(void *garbage) +int kvm_arch_hardware_enable(void *garbage) { /* every s390 is virtualization enabled ;-) */ + return 0; } void kvm_arch_hardware_disable(void *garbage) -- cgit v1.2.1 From d7b0b5eb3000c6fb902f08c619fcd673a23d8fab Mon Sep 17 00:00:00 2001 From: Carsten Otte Date: Thu, 19 Nov 2009 14:21:16 +0100 Subject: KVM: s390: Make psw available on all exits, not just a subset This patch moves s390 processor status word into the base kvm_run struct and keeps it up-to date on all userspace exits. The userspace ABI is broken by this, however there are no applications in the wild using this. A capability check is provided so users can verify the updated API exists. Cc: stable@kernel.org Signed-off-by: Carsten Otte Signed-off-by: Avi Kivity --- arch/s390/include/asm/kvm.h | 3 ++- arch/s390/kvm/kvm-s390.c | 25 +++++++++++++++++-------- 2 files changed, 19 insertions(+), 9 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h index 3dfcaeb5d7f4..82b32a100c7d 100644 --- a/arch/s390/include/asm/kvm.h +++ b/arch/s390/include/asm/kvm.h @@ -1,6 +1,5 @@ #ifndef __LINUX_KVM_S390_H #define __LINUX_KVM_S390_H - /* * asm-s390/kvm.h - KVM s390 specific structures and definitions * @@ -15,6 +14,8 @@ */ #include +#define __KVM_S390 + /* for KVM_GET_REGS and KVM_SET_REGS */ struct kvm_regs { /* general purpose regs for s390 */ diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 544505893c9f..f8bcaefd7d34 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -117,10 +117,16 @@ long kvm_arch_dev_ioctl(struct file *filp, int kvm_dev_ioctl_check_extension(long ext) { + int r; + switch (ext) { + case KVM_CAP_S390_PSW: + r = 1; + break; default: - return 0; + r = 0; } + return r; } /* Section: vm related */ @@ -420,8 +426,10 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) vcpu_load(vcpu); if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) rc = -EBUSY; - else - vcpu->arch.sie_block->gpsw = psw; + else { + vcpu->run->psw_mask = psw.mask; + vcpu->run->psw_addr = psw.addr; + } vcpu_put(vcpu); return rc; } @@ -509,9 +517,6 @@ rerun_vcpu: switch (kvm_run->exit_reason) { case KVM_EXIT_S390_SIEIC: - vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask; - vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr; - break; case KVM_EXIT_UNKNOWN: case KVM_EXIT_INTR: case KVM_EXIT_S390_RESET: @@ -520,6 +525,9 @@ rerun_vcpu: BUG(); } + vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; + vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; + might_fault(); do { @@ -539,8 +547,6 @@ rerun_vcpu: /* intercept cannot be handled in-kernel, prepare kvm-run */ kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; - kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask; - kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr; kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; rc = 0; @@ -552,6 +558,9 @@ rerun_vcpu: rc = 0; } + kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; + kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; + if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); -- cgit v1.2.1 From f50146bd7bdb75435638e60d4960edd9bcdf88b8 Mon Sep 17 00:00:00 2001 From: Carsten Otte Date: Mon, 30 Nov 2009 17:14:41 +0100 Subject: KVM: s390: Fix prefix register checking in arch/s390/kvm/sigp.c This patch corrects the checking of the new address for the prefix register. On s390, the prefix register is used to address the cpu's lowcore (address 0...8k). This check is supposed to verify that the memory is readable and present. copy_from_guest is a helper function, that can be used to read from guest memory. It applies prefixing, adds the start address of the guest memory in user, and then calls copy_from_user. Previous code was obviously broken for two reasons: - prefixing should not be applied here. The current prefix register is going to be updated soon, and the address we're looking for will be 0..8k after we've updated the register - we're adding the guest origin (gmsor) twice: once in subject code and once in copy_from_guest With kuli, we did not hit this problem because (a) we were lucky with previous prefix register content, and (b) our guest memory was mmaped very low into user address space. Cc: stable@kernel.org Signed-off-by: Carsten Otte Reported-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/s390/kvm/sigp.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 40c8c6748cfe..15ee1111de58 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -188,9 +188,9 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, /* make sure that the new value is valid memory */ address = address & 0x7fffe000u; - if ((copy_from_guest(vcpu, &tmp, - (u64) (address + vcpu->arch.sie_block->gmsor) , 1)) || - (copy_from_guest(vcpu, &tmp, (u64) (address + + if ((copy_from_user(&tmp, (void __user *) + (address + vcpu->arch.sie_block->gmsor) , 1)) || + (copy_from_user(&tmp, (void __user *)(address + vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) { *reg |= SIGP_STAT_INVALID_PARAMETER; return 1; /* invalid parameter */ -- cgit v1.2.1 From af901ca181d92aac3a7dc265144a9081a86d8f39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Goddard=20Rosa?= Date: Sat, 14 Nov 2009 13:09:05 -0200 Subject: tree-wide: fix assorted typos all over the place MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That is "success", "unknown", "through", "performance", "[re|un]mapping" , "access", "default", "reasonable", "[con]currently", "temperature" , "channel", "[un]used", "application", "example","hierarchy", "therefore" , "[over|under]flow", "contiguous", "threshold", "enough" and others. Signed-off-by: AndrĂ© Goddard Rosa Signed-off-by: Jiri Kosina --- arch/s390/math-emu/math.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c index 3ee78ccb617d..cd4e9c168dd7 100644 --- a/arch/s390/math-emu/math.c +++ b/arch/s390/math-emu/math.c @@ -2088,7 +2088,7 @@ int math_emu_ldr(__u8 *opcode) { __u16 opc = *((__u16 *) opcode); if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ - /* we got an exception therfore ry can't be in {0,2,4,6} */ + /* we got an exception therefore ry can't be in {0,2,4,6} */ asm volatile( /* load rx from fp_regs.fprs[ry] */ " bras 1,0f\n" " ld 0,0(%1)\n" @@ -2118,7 +2118,7 @@ int math_emu_ler(__u8 *opcode) { __u16 opc = *((__u16 *) opcode); if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ - /* we got an exception therfore ry can't be in {0,2,4,6} */ + /* we got an exception therefore ry can't be in {0,2,4,6} */ asm volatile( /* load rx from fp_regs.fprs[ry] */ " bras 1,0f\n" " le 0,0(%1)\n" -- cgit v1.2.1 From 5506e68975c346e55f786b554e28e368cdede444 Mon Sep 17 00:00:00 2001 From: David Daney Date: Fri, 4 Dec 2009 17:44:53 -0800 Subject: s390: Convert BUG() to use unreachable() Use the new unreachable() macro instead of for(;;); Signed-off-by: David Daney Acked-by: Martin Schwidefsky CC: Heiko Carstens CC: linux390@de.ibm.com CC: linux-s390@vger.kernel.org Signed-off-by: Linus Torvalds --- arch/s390/include/asm/bug.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index 7efd0abe8887..efb74fd5156e 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h @@ -49,7 +49,7 @@ #define BUG() do { \ __EMIT_BUG(0); \ - for (;;); \ + unreachable(); \ } while (0) #define WARN_ON(x) ({ \ -- cgit v1.2.1 From cf87b7439ec81b9374e7772e44e9cb2eb9e57160 Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Mon, 7 Dec 2009 12:44:42 +0100 Subject: [S390] s390: clear high-order bits of registers after sam64 When the kernel is IPLed without the CLEAR option and switches to 64-bit, the high-order half of the registers might contain random values. This can cause addressing exceptions and the kernel enters an interrupt loop. Initialize the high-order half of the general purpose registers with zeros after switching to 64-bit mode. Cc: Signed-off-by: Hendrik Brueckner Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/head64.S | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 6a250808092b..d984a2a380c3 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -83,6 +83,8 @@ startup_continue: slr %r0,%r0 # set cpuid to zero sigp %r1,%r0,0x12 # switch to esame mode sam64 # switch to 64 bit mode + llgfr %r13,%r13 # clear high-order half of base reg + lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area # move IPL device to lowcore @@ -127,6 +129,7 @@ startup_continue: .L4malign:.quad 0xffffffffffc00000 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 .Lnop: .long 0x07000700 +.Lzero64:.fill 16,4,0x0 #ifdef CONFIG_ZFCPDUMP .Lcurrent_cpu: .long 0x0 -- cgit v1.2.1 From 454e1fa1ebae7cff707b2e3f12b775c263c8408b Mon Sep 17 00:00:00 2001 From: Peter Oberparleiter Date: Mon, 7 Dec 2009 12:51:30 +0100 Subject: [S390] cio: split PGID settings and status Split setting (driver wants feature enabled) and status (feature setup was successful) for PGID related ccw device features so that setup errors can be detected. Previously, incorrectly handled setup errors could in rare cases lead to erratic I/O behavior and permanently unusuable devices. Signed-off-by: Peter Oberparleiter Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/ccwdev.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index 2a5419551176..f4bd346a52d3 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -142,6 +142,8 @@ struct ccw1; extern int ccw_device_set_options_mask(struct ccw_device *, unsigned long); extern int ccw_device_set_options(struct ccw_device *, unsigned long); extern void ccw_device_clear_options(struct ccw_device *, unsigned long); +int ccw_device_is_pathgroup(struct ccw_device *cdev); +int ccw_device_is_multipath(struct ccw_device *cdev); /* Allow for i/o completion notification after primary interrupt status. */ #define CCWDEV_EARLY_NOTIFICATION 0x0001 @@ -151,6 +153,8 @@ extern void ccw_device_clear_options(struct ccw_device *, unsigned long); #define CCWDEV_DO_PATHGROUP 0x0004 /* Allow forced onlining of boxed devices. */ #define CCWDEV_ALLOW_FORCE 0x0008 +/* Try to use multipath mode. */ +#define CCWDEV_DO_MULTIPATH 0x0010 extern int ccw_device_start(struct ccw_device *, struct ccw1 *, unsigned long, __u8, unsigned long); -- cgit v1.2.1 From 61365e132ef987f7719af5d2e434db4465957637 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Mon, 7 Dec 2009 12:51:42 +0100 Subject: [S390] Improve address space check. A data access in access-register mode always is a user mode access, the code to inspect the access-registers can be removed. The second change is to use a different test to check for no-execute fault. The third change is to pass the translation exception identification as parameter, in theory the trans_exc_code in the lowcore could have been overwritten by the time the call to check_space from do_no_context is done. Signed-off-by: Martin Schwidefsky --- arch/s390/mm/fault.c | 99 ++++++++++++++++++++++++---------------------------- 1 file changed, 45 insertions(+), 54 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 6d507462967a..3df5b918cfe2 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -100,39 +100,28 @@ void bust_spinlocks(int yes) /* * Returns the address space associated with the fault. - * Returns 0 for kernel space, 1 for user space and - * 2 for code execution in user space with noexec=on. + * Returns 0 for kernel space and 1 for user space. */ -static inline int check_space(struct task_struct *tsk) +static inline int user_space_fault(unsigned long trans_exc_code) { /* - * The lowest two bits of S390_lowcore.trans_exc_code - * indicate which paging table was used. + * The lowest two bits of the translation exception + * identification indicate which paging table was used. */ - int desc = S390_lowcore.trans_exc_code & 3; - - if (desc == 3) /* Home Segment Table Descriptor */ - return switch_amode == 0; - if (desc == 2) /* Secondary Segment Table Descriptor */ - return tsk->thread.mm_segment.ar4; -#ifdef CONFIG_S390_SWITCH_AMODE - if (unlikely(desc == 1)) { /* STD determined via access register */ - /* %a0 always indicates primary space. */ - if (S390_lowcore.exc_access_id != 0) { - save_access_regs(tsk->thread.acrs); - /* - * An alet of 0 indicates primary space. - * An alet of 1 indicates secondary space. - * Any other alet values generate an - * alen-translation exception. - */ - if (tsk->thread.acrs[S390_lowcore.exc_access_id]) - return tsk->thread.mm_segment.ar4; - } - } -#endif - /* Primary Segment Table Descriptor */ - return switch_amode << s390_noexec; + trans_exc_code &= 3; + if (trans_exc_code == 2) + /* Access via secondary space, set_fs setting decides */ + return current->thread.mm_segment.ar4; + if (!switch_amode) + /* User space if the access has been done via home space. */ + return trans_exc_code == 3; + /* + * If the user space is not the home space the kernel runs in home + * space. Access via secondary space has already been covered, + * access via primary space or access register is from user space + * and access via home space is from the kernel. + */ + return trans_exc_code != 3; } /* @@ -162,9 +151,10 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, } static void do_no_context(struct pt_regs *regs, unsigned long error_code, - unsigned long address) + unsigned long trans_exc_code) { const struct exception_table_entry *fixup; + unsigned long address; /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); @@ -177,7 +167,8 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code, * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ - if (check_space(current) == 0) + address = trans_exc_code & __FAIL_ADDR_MASK; + if (user_space_fault(trans_exc_code) == 0) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " at virtual kernel address %p\n", (void *)address); else @@ -188,7 +179,8 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code, do_exit(SIGKILL); } -static void do_low_address(struct pt_regs *regs, unsigned long error_code) +static void do_low_address(struct pt_regs *regs, unsigned long error_code, + unsigned long trans_exc_code) { /* Low-address protection hit in kernel mode means NULL pointer write access in kernel mode. */ @@ -198,11 +190,11 @@ static void do_low_address(struct pt_regs *regs, unsigned long error_code) do_exit(SIGKILL); } - do_no_context(regs, error_code, 0); + do_no_context(regs, error_code, trans_exc_code); } static void do_sigbus(struct pt_regs *regs, unsigned long error_code, - unsigned long address) + unsigned long trans_exc_code) { struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; @@ -212,13 +204,13 @@ static void do_sigbus(struct pt_regs *regs, unsigned long error_code, * Send a sigbus, regardless of whether we were in kernel * or user mode. */ - tsk->thread.prot_addr = address; + tsk->thread.prot_addr = trans_exc_code & __FAIL_ADDR_MASK; tsk->thread.trap_no = error_code; force_sig(SIGBUS, tsk); /* Kernel mode? Handle exceptions or die */ if (!(regs->psw.mask & PSW_MASK_PSTATE)) - do_no_context(regs, error_code, address); + do_no_context(regs, error_code, trans_exc_code); } #ifdef CONFIG_S390_EXEC_PROTECT @@ -272,13 +264,13 @@ static int signal_return(struct mm_struct *mm, struct pt_regs *regs, * 3b Region third trans. -> Not present (nullification) */ static inline void -do_exception(struct pt_regs *regs, unsigned long error_code, int write) +do_exception(struct pt_regs *regs, unsigned long error_code, int write, + unsigned long trans_exc_code) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; unsigned long address; - int space; int si_code; int fault; @@ -288,18 +280,15 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write) tsk = current; mm = tsk->mm; - /* get the failing address and the affected space */ - address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; - space = check_space(tsk); - /* * Verify that the fault happened in user space, that * we are not in an interrupt and that there is a * user context. */ - if (unlikely(space == 0 || in_atomic() || !mm)) + if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto no_context; + address = trans_exc_code & __FAIL_ADDR_MASK; /* * When we get here, the fault happened in the current * task's user address space, so we can switch on the @@ -315,7 +304,8 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write) goto bad_area; #ifdef CONFIG_S390_EXEC_PROTECT - if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC))) + if (unlikely((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY && + (trans_exc_code & 3) == 0 && !(vma->vm_flags & VM_EXEC))) if (!signal_return(mm, regs, address, error_code)) /* * signal_return() has done an up_read(&mm->mmap_sem) @@ -397,12 +387,14 @@ bad_area: } no_context: - do_no_context(regs, error_code, address); + do_no_context(regs, error_code, trans_exc_code); } void __kprobes do_protection_exception(struct pt_regs *regs, long error_code) { + unsigned long trans_exc_code = S390_lowcore.trans_exc_code; + /* Protection exception is supressing, decrement psw address. */ regs->psw.addr -= (error_code >> 16); /* @@ -410,31 +402,30 @@ void __kprobes do_protection_exception(struct pt_regs *regs, * as a special case because the translation exception code * field is not guaranteed to contain valid data in this case. */ - if (unlikely(!(S390_lowcore.trans_exc_code & 4))) { - do_low_address(regs, error_code); + if (unlikely(!(trans_exc_code & 4))) { + do_low_address(regs, error_code, trans_exc_code); return; } - do_exception(regs, 4, 1); + do_exception(regs, 4, 1, trans_exc_code); } void __kprobes do_dat_exception(struct pt_regs *regs, long error_code) { - do_exception(regs, error_code & 0xff, 0); + do_exception(regs, error_code & 0xff, 0, S390_lowcore.trans_exc_code); } #ifdef CONFIG_64BIT void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code) { + unsigned long trans_exc_code = S390_lowcore.trans_exc_code; struct mm_struct *mm; struct vm_area_struct *vma; unsigned long address; - int space; mm = current->mm; - address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; - space = check_space(current); + address = trans_exc_code & __FAIL_ADDR_MASK; - if (unlikely(space == 0 || in_atomic() || !mm)) + if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto no_context; local_irq_enable(); @@ -457,7 +448,7 @@ void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code) } no_context: - do_no_context(regs, error_code, address); + do_no_context(regs, error_code, trans_exc_code); } #endif -- cgit v1.2.1 From b11b53342773361f3353b285eb6a3fd6074e7997 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Mon, 7 Dec 2009 12:51:43 +0100 Subject: [S390] Improve address space mode selection. Introduce user_mode to replace the two variables switch_amode and s390_noexec. There are three valid combinations of the old values: 1) switch_amode == 0 && s390_noexec == 0 2) switch_amode == 1 && s390_noexec == 0 3) switch_amode == 1 && s390_noexec == 1 They get replaced by 1) user_mode == HOME_SPACE_MODE 2) user_mode == PRIMARY_SPACE_MODE 3) user_mode == SECONDARY_SPACE_MODE The new kernel parameter user_mode=[primary,secondary,home] lets you choose the address space mode the user space processes should use. In addition the CONFIG_S390_SWITCH_AMODE config option is removed. Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 15 --------------- arch/s390/defconfig | 1 - arch/s390/include/asm/mmu_context.h | 4 ++-- arch/s390/include/asm/pgalloc.h | 3 ++- arch/s390/include/asm/setup.h | 17 ++++++----------- arch/s390/kernel/setup.c | 36 ++++++++++++++++++++---------------- arch/s390/kernel/vdso.c | 9 +++++---- arch/s390/kvm/Kconfig | 1 - arch/s390/lib/uaccess_mvcos.c | 4 ---- arch/s390/mm/fault.c | 4 ++-- arch/s390/mm/pgtable.c | 2 +- 11 files changed, 38 insertions(+), 58 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 16c673096a22..c80235206c01 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -220,23 +220,8 @@ config AUDIT_ARCH bool default y -config S390_SWITCH_AMODE - bool "Switch kernel/user addressing modes" - help - This option allows to switch the addressing modes of kernel and user - space. The kernel parameter switch_amode=on will enable this feature, - default is disabled. Enabling this (via kernel parameter) on machines - earlier than IBM System z9-109 EC/BC will reduce system performance. - - Note that this option will also be selected by selecting the execute - protection option below. Enabling the execute protection via the - noexec kernel parameter will also switch the addressing modes, - independent of the switch_amode kernel parameter. - - config S390_EXEC_PROTECT bool "Data execute protection" - select S390_SWITCH_AMODE help This option allows to enable a buffer overflow protection for user space programs and it also selects the addressing mode option above. diff --git a/arch/s390/defconfig b/arch/s390/defconfig index ab4464486b7a..f4e53c6708dc 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -185,7 +185,6 @@ CONFIG_HOTPLUG_CPU=y CONFIG_COMPAT=y CONFIG_SYSVIPC_COMPAT=y CONFIG_AUDIT_ARCH=y -CONFIG_S390_SWITCH_AMODE=y CONFIG_S390_EXEC_PROTECT=y # diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index fc7edd6f41b6..976e273988c2 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -36,7 +36,7 @@ static inline int init_new_context(struct task_struct *tsk, mm->context.has_pgste = 1; mm->context.alloc_pgste = 1; } else { - mm->context.noexec = s390_noexec; + mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE); mm->context.has_pgste = 0; mm->context.alloc_pgste = 0; } @@ -58,7 +58,7 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) pgd_t *pgd = mm->pgd; S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); - if (switch_amode) { + if (user_mode != HOME_SPACE_MODE) { /* Load primary space page table origin. */ pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd; S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd); diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index ddad5903341c..68940d0bad91 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -143,7 +143,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) spin_lock_init(&mm->context.list_lock); INIT_LIST_HEAD(&mm->context.crst_list); INIT_LIST_HEAD(&mm->context.pgtable_list); - return (pgd_t *) crst_table_alloc(mm, s390_noexec); + return (pgd_t *) + crst_table_alloc(mm, user_mode == SECONDARY_SPACE_MODE); } #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index e37478e87286..52a779c337e8 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -49,17 +49,12 @@ extern unsigned long memory_end; void detect_memory_layout(struct mem_chunk chunk[]); -#ifdef CONFIG_S390_SWITCH_AMODE -extern unsigned int switch_amode; -#else -#define switch_amode (0) -#endif - -#ifdef CONFIG_S390_EXEC_PROTECT -extern unsigned int s390_noexec; -#else -#define s390_noexec (0) -#endif +#define PRIMARY_SPACE_MODE 0 +#define ACCESS_REGISTER_MODE 1 +#define SECONDARY_SPACE_MODE 2 +#define HOME_SPACE_MODE 3 + +extern unsigned int user_mode; /* * Machine features detected in head.S diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 061479ff029f..0663287fa1b3 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -305,9 +305,8 @@ static int __init early_parse_mem(char *p) } early_param("mem", early_parse_mem); -#ifdef CONFIG_S390_SWITCH_AMODE -unsigned int switch_amode = 0; -EXPORT_SYMBOL_GPL(switch_amode); +unsigned int user_mode = HOME_SPACE_MODE; +EXPORT_SYMBOL_GPL(user_mode); static int set_amode_and_uaccess(unsigned long user_amode, unsigned long user32_amode) @@ -340,23 +339,29 @@ static int set_amode_and_uaccess(unsigned long user_amode, */ static int __init early_parse_switch_amode(char *p) { - switch_amode = 1; + if (user_mode != SECONDARY_SPACE_MODE) + user_mode = PRIMARY_SPACE_MODE; return 0; } early_param("switch_amode", early_parse_switch_amode); -#else /* CONFIG_S390_SWITCH_AMODE */ -static inline int set_amode_and_uaccess(unsigned long user_amode, - unsigned long user32_amode) +static int __init early_parse_user_mode(char *p) { + if (p && strcmp(p, "primary") == 0) + user_mode = PRIMARY_SPACE_MODE; +#ifdef CONFIG_S390_EXEC_PROTECT + else if (p && strcmp(p, "secondary") == 0) + user_mode = SECONDARY_SPACE_MODE; +#endif + else if (!p || strcmp(p, "home") == 0) + user_mode = HOME_SPACE_MODE; + else + return 1; return 0; } -#endif /* CONFIG_S390_SWITCH_AMODE */ +early_param("user_mode", early_parse_user_mode); #ifdef CONFIG_S390_EXEC_PROTECT -unsigned int s390_noexec = 0; -EXPORT_SYMBOL_GPL(s390_noexec); - /* * Enable execute protection? */ @@ -364,8 +369,7 @@ static int __init early_parse_noexec(char *p) { if (!strncmp(p, "off", 3)) return 0; - switch_amode = 1; - s390_noexec = 1; + user_mode = SECONDARY_SPACE_MODE; return 0; } early_param("noexec", early_parse_noexec); @@ -373,7 +377,7 @@ early_param("noexec", early_parse_noexec); static void setup_addressing_mode(void) { - if (s390_noexec) { + if (user_mode == SECONDARY_SPACE_MODE) { if (set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY)) pr_info("Execute protection active, " @@ -381,7 +385,7 @@ static void setup_addressing_mode(void) else pr_info("Execute protection active, " "mvcos not available\n"); - } else if (switch_amode) { + } else if (user_mode == PRIMARY_SPACE_MODE) { if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) pr_info("Address spaces switched, " "mvcos available\n"); @@ -411,7 +415,7 @@ setup_lowcore(void) lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) restart_int_handler; - if (switch_amode) + if (user_mode != HOME_SPACE_MODE) lc->restart_psw.mask |= PSW_ASC_HOME; lc->external_new_psw.mask = psw_kernel_bits; lc->external_new_psw.addr = diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index adfb32aa6d59..5f99e66c51c3 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -86,7 +86,8 @@ static void vdso_init_data(struct vdso_data *vd) unsigned int facility_list; facility_list = stfl(); - vd->ectg_available = switch_amode && (facility_list & 1); + vd->ectg_available = + user_mode != HOME_SPACE_MODE && (facility_list & 1); } #ifdef CONFIG_64BIT @@ -114,7 +115,7 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) lowcore->vdso_per_cpu_data = __LC_PASTE; - if (!switch_amode || !vdso_enabled) + if (user_mode == HOME_SPACE_MODE || !vdso_enabled) return 0; segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); @@ -160,7 +161,7 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) unsigned long segment_table, page_table, page_frame; u32 *psal, *aste; - if (!switch_amode || !vdso_enabled) + if (user_mode == HOME_SPACE_MODE || !vdso_enabled) return; psal = (u32 *)(addr_t) lowcore->paste[4]; @@ -184,7 +185,7 @@ static void __vdso_init_cr5(void *dummy) static void vdso_init_cr5(void) { - if (switch_amode && vdso_enabled) + if (user_mode != HOME_SPACE_MODE && vdso_enabled) on_each_cpu(__vdso_init_cr5, NULL, 1); } #endif /* CONFIG_64BIT */ diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index bf164fc21864..6ee55ae84ce2 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -20,7 +20,6 @@ config KVM depends on HAVE_KVM && EXPERIMENTAL select PREEMPT_NOTIFIERS select ANON_INODES - select S390_SWITCH_AMODE ---help--- Support hosting paravirtualized guest machines using the SIE virtualization capability on the mainframe. This should work diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c index 58da3f461214..60455f104ea3 100644 --- a/arch/s390/lib/uaccess_mvcos.c +++ b/arch/s390/lib/uaccess_mvcos.c @@ -162,7 +162,6 @@ static size_t clear_user_mvcos(size_t size, void __user *to) return size; } -#ifdef CONFIG_S390_SWITCH_AMODE static size_t strnlen_user_mvcos(size_t count, const char __user *src) { char buf[256]; @@ -200,7 +199,6 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src, } while ((len_str == len) && (done < count)); return done; } -#endif /* CONFIG_S390_SWITCH_AMODE */ struct uaccess_ops uaccess_mvcos = { .copy_from_user = copy_from_user_mvcos_check, @@ -215,7 +213,6 @@ struct uaccess_ops uaccess_mvcos = { .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, }; -#ifdef CONFIG_S390_SWITCH_AMODE struct uaccess_ops uaccess_mvcos_switch = { .copy_from_user = copy_from_user_mvcos, .copy_from_user_small = copy_from_user_mvcos, @@ -228,4 +225,3 @@ struct uaccess_ops uaccess_mvcos_switch = { .futex_atomic_op = futex_atomic_op_pt, .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, }; -#endif diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 3df5b918cfe2..77108e34fc1a 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -112,7 +112,7 @@ static inline int user_space_fault(unsigned long trans_exc_code) if (trans_exc_code == 2) /* Access via secondary space, set_fs setting decides */ return current->thread.mm_segment.ar4; - if (!switch_amode) + if (user_mode == HOME_SPACE_MODE) /* User space if the access has been done via home space. */ return trans_exc_code == 3; /* @@ -168,7 +168,7 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code, * terminate things with extreme prejudice. */ address = trans_exc_code & __FAIL_ADDR_MASK; - if (user_space_fault(trans_exc_code) == 0) + if (!user_space_fault(trans_exc_code)) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " at virtual kernel address %p\n", (void *)address); else diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 2757c5616a07..ad621e06ada3 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -269,7 +269,7 @@ int s390_enable_sie(void) struct mm_struct *mm, *old_mm; /* Do we have switched amode? If no, we cannot do sie */ - if (!switch_amode) + if (user_mode == HOME_SPACE_MODE) return -EINVAL; /* Do we have pgstes? if yes, we are done */ -- cgit v1.2.1 From 7ecb344ae80bc03397ded3b004e06ecfe32becf9 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Mon, 7 Dec 2009 12:51:44 +0100 Subject: [S390] Improve notify_page_fault implementation. notify_page_fault does a preempt_disable/preempt_enable for each fault generated by a kernel access to user space. If kprobes is not active that is unnecessary since the interrupts are not reenabled yet. To play safe repeat the kprobe_running check after preempt_disable(). Signed-off-by: Martin Schwidefsky --- arch/s390/mm/fault.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 77108e34fc1a..fd72c269cdb4 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -52,11 +52,11 @@ extern int sysctl_userprocess_debug; #endif -#ifdef CONFIG_KPROBES -static inline int notify_page_fault(struct pt_regs *regs, long err) +static inline int notify_page_fault(struct pt_regs *regs) { int ret = 0; +#ifdef CONFIG_KPROBES /* kprobe_running() needs smp_processor_id() */ if (!user_mode(regs)) { preempt_disable(); @@ -64,15 +64,9 @@ static inline int notify_page_fault(struct pt_regs *regs, long err) ret = 1; preempt_enable(); } - +#endif return ret; } -#else -static inline int notify_page_fault(struct pt_regs *regs, long err) -{ - return 0; -} -#endif /* @@ -274,7 +268,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write, int si_code; int fault; - if (notify_page_fault(regs, error_code)) + if (notify_page_fault(regs)) return; tsk = current; -- cgit v1.2.1 From 50d7280d430484a890ddcadc7f738b5b6dd28bf1 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Mon, 7 Dec 2009 12:51:45 +0100 Subject: [S390] fault handler performance optimization. Slim down the do_exception function to handle only the fast path of a fault and move the exceptional cases into a new function. That slightly increases the performance of the fault handling. Build fix for !CONFIG_COMPAT by Kamalesh Babulal Signed-off-by: Martin Schwidefsky --- arch/s390/mm/fault.c | 258 +++++++++++++++++++++++++-------------------------- 1 file changed, 129 insertions(+), 129 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index fd72c269cdb4..0dcfcfb5b5be 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -34,16 +34,15 @@ #include #include #include +#include #include "../kernel/entry.h" #ifndef CONFIG_64BIT #define __FAIL_ADDR_MASK 0x7ffff000 -#define __FIXUP_MASK 0x7fffffff #define __SUBCODE_MASK 0x0200 #define __PF_RES_FIELD 0ULL #else /* CONFIG_64BIT */ #define __FAIL_ADDR_MASK -4096L -#define __FIXUP_MASK ~0L #define __SUBCODE_MASK 0x0600 #define __PF_RES_FIELD 0x8000000000000000ULL #endif /* CONFIG_64BIT */ @@ -52,6 +51,10 @@ extern int sysctl_userprocess_debug; #endif +#define VM_FAULT_BADCONTEXT 0x010000 +#define VM_FAULT_BADMAP 0x020000 +#define VM_FAULT_BADACCESS 0x040000 + static inline int notify_page_fault(struct pt_regs *regs) { int ret = 0; @@ -122,18 +125,22 @@ static inline int user_space_fault(unsigned long trans_exc_code) * Send SIGSEGV to task. This is an external routine * to keep the stack usage of do_page_fault small. */ -static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, - int si_code, unsigned long address) +static noinline void do_sigsegv(struct pt_regs *regs, long int_code, + int si_code, unsigned long trans_exc_code) { struct siginfo si; + unsigned long address; + address = trans_exc_code & __FAIL_ADDR_MASK; + current->thread.prot_addr = address; + current->thread.trap_no = int_code; #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) #if defined(CONFIG_SYSCTL) if (sysctl_userprocess_debug) #endif { printk("User process fault: interruption code 0x%lX\n", - error_code); + int_code); printk("failing address: %lX\n", address); show_regs(regs); } @@ -144,14 +151,14 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, force_sig_info(SIGSEGV, &si, current); } -static void do_no_context(struct pt_regs *regs, unsigned long error_code, - unsigned long trans_exc_code) +static noinline void do_no_context(struct pt_regs *regs, long int_code, + unsigned long trans_exc_code) { const struct exception_table_entry *fixup; unsigned long address; /* Are we prepared to handle this kernel fault? */ - fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); + fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (fixup) { regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; return; @@ -169,107 +176,127 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code, printk(KERN_ALERT "Unable to handle kernel paging request" " at virtual user address %p\n", (void *)address); - die("Oops", regs, error_code); + die("Oops", regs, int_code); do_exit(SIGKILL); } -static void do_low_address(struct pt_regs *regs, unsigned long error_code, - unsigned long trans_exc_code) +static noinline void do_low_address(struct pt_regs *regs, long int_code, + unsigned long trans_exc_code) { /* Low-address protection hit in kernel mode means NULL pointer write access in kernel mode. */ if (regs->psw.mask & PSW_MASK_PSTATE) { /* Low-address protection hit in user mode 'cannot happen'. */ - die ("Low-address protection", regs, error_code); + die ("Low-address protection", regs, int_code); do_exit(SIGKILL); } - do_no_context(regs, error_code, trans_exc_code); + do_no_context(regs, int_code, trans_exc_code); } -static void do_sigbus(struct pt_regs *regs, unsigned long error_code, - unsigned long trans_exc_code) +static noinline void do_sigbus(struct pt_regs *regs, long int_code, + unsigned long trans_exc_code) { struct task_struct *tsk = current; - struct mm_struct *mm = tsk->mm; - up_read(&mm->mmap_sem); /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ tsk->thread.prot_addr = trans_exc_code & __FAIL_ADDR_MASK; - tsk->thread.trap_no = error_code; + tsk->thread.trap_no = int_code; force_sig(SIGBUS, tsk); - - /* Kernel mode? Handle exceptions or die */ - if (!(regs->psw.mask & PSW_MASK_PSTATE)) - do_no_context(regs, error_code, trans_exc_code); } #ifdef CONFIG_S390_EXEC_PROTECT -static int signal_return(struct mm_struct *mm, struct pt_regs *regs, - unsigned long address, unsigned long error_code) +static noinline int signal_return(struct pt_regs *regs, long int_code, + unsigned long trans_exc_code) { u16 instruction; int rc; -#ifdef CONFIG_COMPAT - int compat; -#endif - pagefault_disable(); rc = __get_user(instruction, (u16 __user *) regs->psw.addr); - pagefault_enable(); - if (rc) - return -EFAULT; - up_read(&mm->mmap_sem); - clear_tsk_thread_flag(current, TIF_SINGLE_STEP); -#ifdef CONFIG_COMPAT - compat = is_compat_task(); - if (compat && instruction == 0x0a77) - sys32_sigreturn(); - else if (compat && instruction == 0x0aad) - sys32_rt_sigreturn(); - else -#endif - if (instruction == 0x0a77) - sys_sigreturn(); - else if (instruction == 0x0aad) - sys_rt_sigreturn(); - else { - current->thread.prot_addr = address; - current->thread.trap_no = error_code; - do_sigsegv(regs, error_code, SEGV_MAPERR, address); - } + if (!rc && instruction == 0x0a77) { + clear_tsk_thread_flag(current, TIF_SINGLE_STEP); + if (is_compat_task()) + sys32_sigreturn(); + else + sys_sigreturn(); + } else if (!rc && instruction == 0x0aad) { + clear_tsk_thread_flag(current, TIF_SINGLE_STEP); + if (is_compat_task()) + sys32_rt_sigreturn(); + else + sys_rt_sigreturn(); + } else + do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code); return 0; } #endif /* CONFIG_S390_EXEC_PROTECT */ +static noinline void do_fault_error(struct pt_regs *regs, long int_code, + unsigned long trans_exc_code, int fault) +{ + int si_code; + + switch (fault) { + case VM_FAULT_BADACCESS: +#ifdef CONFIG_S390_EXEC_PROTECT + if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY && + (trans_exc_code & 3) == 0) { + signal_return(regs, int_code, trans_exc_code); + break; + } +#endif /* CONFIG_S390_EXEC_PROTECT */ + case VM_FAULT_BADMAP: + /* Bad memory access. Check if it is kernel or user space. */ + if (regs->psw.mask & PSW_MASK_PSTATE) { + /* User mode accesses just cause a SIGSEGV */ + si_code = (fault == VM_FAULT_BADMAP) ? + SEGV_MAPERR : SEGV_ACCERR; + do_sigsegv(regs, int_code, si_code, trans_exc_code); + return; + } + case VM_FAULT_BADCONTEXT: + do_no_context(regs, int_code, trans_exc_code); + break; + default: /* fault & VM_FAULT_ERROR */ + if (fault & VM_FAULT_OOM) + pagefault_out_of_memory(); + else if (fault & VM_FAULT_SIGBUS) { + do_sigbus(regs, int_code, trans_exc_code); + /* Kernel mode? Handle exceptions or die */ + if (!(regs->psw.mask & PSW_MASK_PSTATE)) + do_no_context(regs, int_code, trans_exc_code); + } else + BUG(); + break; + } +} + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * - * error_code: + * interruption code (int_code): * 04 Protection -> Write-Protection (suprression) * 10 Segment translation -> Not present (nullification) * 11 Page translation -> Not present (nullification) * 3b Region third trans. -> Not present (nullification) */ -static inline void -do_exception(struct pt_regs *regs, unsigned long error_code, int write, - unsigned long trans_exc_code) +static inline int do_exception(struct pt_regs *regs, int write, + unsigned long trans_exc_code) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; unsigned long address; - int si_code; int fault; if (notify_page_fault(regs)) - return; + return 0; tsk = current; mm = tsk->mm; @@ -279,8 +306,9 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write, * we are not in an interrupt and that there is a * user context. */ + fault = VM_FAULT_BADCONTEXT; if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) - goto no_context; + goto out; address = trans_exc_code & __FAIL_ADDR_MASK; /* @@ -292,41 +320,35 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); down_read(&mm->mmap_sem); - si_code = SEGV_MAPERR; + fault = VM_FAULT_BADMAP; vma = find_vma(mm, address); if (!vma) - goto bad_area; + goto out_up; + if (unlikely(vma->vm_start > address)) { + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto out_up; + if (expand_stack(vma, address)) + goto out_up; + } + + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ + fault = VM_FAULT_BADACCESS; #ifdef CONFIG_S390_EXEC_PROTECT if (unlikely((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY && (trans_exc_code & 3) == 0 && !(vma->vm_flags & VM_EXEC))) - if (!signal_return(mm, regs, address, error_code)) - /* - * signal_return() has done an up_read(&mm->mmap_sem) - * if it returns 0. - */ - return; + goto out_up; #endif - - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; -/* - * Ok, we have a good vm_area for this memory access, so - * we can handle it.. - */ -good_area: - si_code = SEGV_ACCERR; if (!write) { /* page not present, check vm flags */ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) - goto bad_area; + goto out_up; } else { if (!(vma->vm_flags & VM_WRITE)) - goto bad_area; + goto out_up; } if (is_vm_hugetlb_page(vma)) @@ -337,17 +359,9 @@ good_area: * the fault. */ fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); - if (unlikely(fault & VM_FAULT_ERROR)) { - if (fault & VM_FAULT_OOM) { - up_read(&mm->mmap_sem); - pagefault_out_of_memory(); - return; - } else if (fault & VM_FAULT_SIGBUS) { - do_sigbus(regs, error_code, address); - return; - } - BUG(); - } + if (unlikely(fault & VM_FAULT_ERROR)) + goto out_up; + if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, @@ -357,67 +371,55 @@ good_area: perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } - up_read(&mm->mmap_sem); /* * The instruction that caused the program check will * be repeated. Don't signal single step via SIGTRAP. */ clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); - return; - -/* - * Something tried to access memory that isn't in our memory map.. - * Fix it, but check if it's kernel or user first.. - */ -bad_area: + fault = 0; +out_up: up_read(&mm->mmap_sem); - - /* User mode accesses just cause a SIGSEGV */ - if (regs->psw.mask & PSW_MASK_PSTATE) { - tsk->thread.prot_addr = address; - tsk->thread.trap_no = error_code; - do_sigsegv(regs, error_code, si_code, address); - return; - } - -no_context: - do_no_context(regs, error_code, trans_exc_code); +out: + return fault; } -void __kprobes do_protection_exception(struct pt_regs *regs, - long error_code) +void __kprobes do_protection_exception(struct pt_regs *regs, long int_code) { unsigned long trans_exc_code = S390_lowcore.trans_exc_code; + int fault; /* Protection exception is supressing, decrement psw address. */ - regs->psw.addr -= (error_code >> 16); + regs->psw.addr -= (int_code >> 16); /* * Check for low-address protection. This needs to be treated * as a special case because the translation exception code * field is not guaranteed to contain valid data in this case. */ if (unlikely(!(trans_exc_code & 4))) { - do_low_address(regs, error_code, trans_exc_code); + do_low_address(regs, int_code, trans_exc_code); return; } - do_exception(regs, 4, 1, trans_exc_code); + fault = do_exception(regs, 1, trans_exc_code); + if (unlikely(fault)) + do_fault_error(regs, 4, trans_exc_code, fault); } -void __kprobes do_dat_exception(struct pt_regs *regs, long error_code) +void __kprobes do_dat_exception(struct pt_regs *regs, long int_code) { - do_exception(regs, error_code & 0xff, 0, S390_lowcore.trans_exc_code); + unsigned long trans_exc_code = S390_lowcore.trans_exc_code; + int fault; + + fault = do_exception(regs, 0, trans_exc_code); + if (unlikely(fault)) + do_fault_error(regs, int_code & 255, trans_exc_code, fault); } #ifdef CONFIG_64BIT -void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code) +void __kprobes do_asce_exception(struct pt_regs *regs, long int_code) { unsigned long trans_exc_code = S390_lowcore.trans_exc_code; - struct mm_struct *mm; + struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long address; - - mm = current->mm; - address = trans_exc_code & __FAIL_ADDR_MASK; if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto no_context; @@ -425,7 +427,7 @@ void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code) local_irq_enable(); down_read(&mm->mmap_sem); - vma = find_vma(mm, address); + vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); up_read(&mm->mmap_sem); if (vma) { @@ -435,14 +437,12 @@ void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code) /* User mode accesses just cause a SIGSEGV */ if (regs->psw.mask & PSW_MASK_PSTATE) { - current->thread.prot_addr = address; - current->thread.trap_no = error_code; - do_sigsegv(regs, error_code, SEGV_MAPERR, address); + do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code); return; } no_context: - do_no_context(regs, error_code, trans_exc_code); + do_no_context(regs, int_code, trans_exc_code); } #endif @@ -507,7 +507,7 @@ void pfault_fini(void) : : "a" (&refbk), "m" (refbk) : "cc"); } -static void pfault_interrupt(__u16 error_code) +static void pfault_interrupt(__u16 int_code) { struct task_struct *tsk; __u16 subcode; -- cgit v1.2.1 From 1ab947de293f43812276b60cf9fa21127e7a5bb2 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Mon, 7 Dec 2009 12:51:46 +0100 Subject: [S390] fault handler access flags check. Simplify the check of the vma->flags in do_exception for the different fault types. Signed-off-by: Martin Schwidefsky --- arch/s390/mm/fault.c | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 0dcfcfb5b5be..5a9e9a77dc16 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -286,7 +286,7 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code, * 11 Page translation -> Not present (nullification) * 3b Region third trans. -> Not present (nullification) */ -static inline int do_exception(struct pt_regs *regs, int write, +static inline int do_exception(struct pt_regs *regs, int access, unsigned long trans_exc_code) { struct task_struct *tsk; @@ -337,19 +337,8 @@ static inline int do_exception(struct pt_regs *regs, int write, * we can handle it.. */ fault = VM_FAULT_BADACCESS; -#ifdef CONFIG_S390_EXEC_PROTECT - if (unlikely((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY && - (trans_exc_code & 3) == 0 && !(vma->vm_flags & VM_EXEC))) + if (unlikely(!(vma->vm_flags & access))) goto out_up; -#endif - if (!write) { - /* page not present, check vm flags */ - if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) - goto out_up; - } else { - if (!(vma->vm_flags & VM_WRITE)) - goto out_up; - } if (is_vm_hugetlb_page(vma)) address &= HPAGE_MASK; @@ -358,7 +347,8 @@ static inline int do_exception(struct pt_regs *regs, int write, * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); + fault = handle_mm_fault(mm, vma, address, + (access == VM_WRITE) ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) goto out_up; @@ -399,7 +389,7 @@ void __kprobes do_protection_exception(struct pt_regs *regs, long int_code) do_low_address(regs, int_code, trans_exc_code); return; } - fault = do_exception(regs, 1, trans_exc_code); + fault = do_exception(regs, VM_WRITE, trans_exc_code); if (unlikely(fault)) do_fault_error(regs, 4, trans_exc_code, fault); } @@ -407,9 +397,15 @@ void __kprobes do_protection_exception(struct pt_regs *regs, long int_code) void __kprobes do_dat_exception(struct pt_regs *regs, long int_code) { unsigned long trans_exc_code = S390_lowcore.trans_exc_code; - int fault; + int access, fault; - fault = do_exception(regs, 0, trans_exc_code); + access = VM_READ | VM_EXEC | VM_WRITE; +#ifdef CONFIG_S390_EXEC_PROTECT + if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY && + (trans_exc_code & 3) == 0) + access = VM_EXEC; +#endif + fault = do_exception(regs, access, trans_exc_code); if (unlikely(fault)) do_fault_error(regs, int_code & 255, trans_exc_code, fault); } -- cgit v1.2.1 From 6c1e3e79430615d0472dbf9f8fed89c571e66423 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Mon, 7 Dec 2009 12:51:47 +0100 Subject: [S390] Use do_exception() in pagetable walk usercopy functions. The pagetable walk usercopy functions have used a modified copy of the do_exception() function for fault handling. This lead to inconsistencies with recent changes to do_exception(), e.g. performance counters. This patch changes the pagetable walk usercopy code to call do_exception() directly, eliminating the redundancy. A new parameter is added to do_exception() to specify the fault address. Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/uaccess.h | 2 + arch/s390/lib/uaccess_pt.c | 147 ++++++++++++++-------------------------- arch/s390/mm/fault.c | 23 +++++++ 3 files changed, 76 insertions(+), 96 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 8377e91533d2..cbf0a8745bf4 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -93,6 +93,8 @@ extern struct uaccess_ops uaccess_mvcos; extern struct uaccess_ops uaccess_mvcos_switch; extern struct uaccess_ops uaccess_pt; +extern int __handle_fault(unsigned long, unsigned long, int); + static inline int __put_user_fn(size_t size, void __user *ptr, void *x) { size = uaccess.copy_to_user_small(size, ptr, x); diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index cb5d59eab0ee..404f2de296dc 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -23,86 +23,21 @@ static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr) pgd = pgd_offset(mm, addr); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) - return NULL; + return (pte_t *) 0x3a; pud = pud_offset(pgd, addr); if (pud_none(*pud) || unlikely(pud_bad(*pud))) - return NULL; + return (pte_t *) 0x3b; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) - return NULL; + return (pte_t *) 0x10; return pte_offset_map(pmd, addr); } -static int __handle_fault(struct mm_struct *mm, unsigned long address, - int write_access) -{ - struct vm_area_struct *vma; - int ret = -EFAULT; - int fault; - - if (in_atomic()) - return ret; - down_read(&mm->mmap_sem); - vma = find_vma(mm, address); - if (unlikely(!vma)) - goto out; - if (unlikely(vma->vm_start > address)) { - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto out; - if (expand_stack(vma, address)) - goto out; - } - - if (!write_access) { - /* page not present, check vm flags */ - if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) - goto out; - } else { - if (!(vma->vm_flags & VM_WRITE)) - goto out; - } - -survive: - fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0); - if (unlikely(fault & VM_FAULT_ERROR)) { - if (fault & VM_FAULT_OOM) - goto out_of_memory; - else if (fault & VM_FAULT_SIGBUS) - goto out_sigbus; - BUG(); - } - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; - ret = 0; -out: - up_read(&mm->mmap_sem); - return ret; - -out_of_memory: - up_read(&mm->mmap_sem); - if (is_global_init(current)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk("VM: killing process %s\n", current->comm); - return ret; - -out_sigbus: - up_read(&mm->mmap_sem); - current->thread.prot_addr = address; - current->thread.trap_no = 0x11; - force_sig(SIGBUS, current); - return ret; -} - -static size_t __user_copy_pt(unsigned long uaddr, void *kptr, - size_t n, int write_user) +static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, + size_t n, int write_user) { struct mm_struct *mm = current->mm; unsigned long offset, pfn, done, size; @@ -114,12 +49,17 @@ retry: spin_lock(&mm->page_table_lock); do { pte = follow_table(mm, uaddr); - if (!pte || !pte_present(*pte) || - (write_user && !pte_write(*pte))) + if ((unsigned long) pte < 0x1000) goto fault; + if (!pte_present(*pte)) { + pte = (pte_t *) 0x11; + goto fault; + } else if (write_user && !pte_write(*pte)) { + pte = (pte_t *) 0x04; + goto fault; + } pfn = pte_pfn(*pte); - offset = uaddr & (PAGE_SIZE - 1); size = min(n - done, PAGE_SIZE - offset); if (write_user) { @@ -137,7 +77,7 @@ retry: return n - done; fault: spin_unlock(&mm->page_table_lock); - if (__handle_fault(mm, uaddr, write_user)) + if (__handle_fault(uaddr, (unsigned long) pte, write_user)) return n - done; goto retry; } @@ -146,30 +86,31 @@ fault: * Do DAT for user address by page table walk, return kernel address. * This function needs to be called with current->mm->page_table_lock held. */ -static unsigned long __dat_user_addr(unsigned long uaddr) +static __always_inline unsigned long __dat_user_addr(unsigned long uaddr) { struct mm_struct *mm = current->mm; - unsigned long pfn, ret; + unsigned long pfn; pte_t *pte; int rc; - ret = 0; retry: pte = follow_table(mm, uaddr); - if (!pte || !pte_present(*pte)) + if ((unsigned long) pte < 0x1000) goto fault; + if (!pte_present(*pte)) { + pte = (pte_t *) 0x11; + goto fault; + } pfn = pte_pfn(*pte); - ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); -out: - return ret; + return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); fault: spin_unlock(&mm->page_table_lock); - rc = __handle_fault(mm, uaddr, 0); + rc = __handle_fault(uaddr, (unsigned long) pte, 0); spin_lock(&mm->page_table_lock); - if (rc) - goto out; - goto retry; + if (!rc) + goto retry; + return 0; } size_t copy_from_user_pt(size_t n, const void __user *from, void *to) @@ -234,8 +175,12 @@ retry: spin_lock(&mm->page_table_lock); do { pte = follow_table(mm, uaddr); - if (!pte || !pte_present(*pte)) + if ((unsigned long) pte < 0x1000) + goto fault; + if (!pte_present(*pte)) { + pte = (pte_t *) 0x11; goto fault; + } pfn = pte_pfn(*pte); offset = uaddr & (PAGE_SIZE-1); @@ -249,9 +194,8 @@ retry: return done + 1; fault: spin_unlock(&mm->page_table_lock); - if (__handle_fault(mm, uaddr, 0)) { + if (__handle_fault(uaddr, (unsigned long) pte, 0)) return 0; - } goto retry; } @@ -284,7 +228,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to, { struct mm_struct *mm = current->mm; unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, - uaddr, done, size; + uaddr, done, size, error_code; unsigned long uaddr_from = (unsigned long) from; unsigned long uaddr_to = (unsigned long) to; pte_t *pte_from, *pte_to; @@ -298,17 +242,28 @@ static size_t copy_in_user_pt(size_t n, void __user *to, retry: spin_lock(&mm->page_table_lock); do { + write_user = 0; + uaddr = uaddr_from; pte_from = follow_table(mm, uaddr_from); - if (!pte_from || !pte_present(*pte_from)) { - uaddr = uaddr_from; - write_user = 0; + error_code = (unsigned long) pte_from; + if (error_code < 0x1000) + goto fault; + if (!pte_present(*pte_from)) { + error_code = 0x11; goto fault; } + write_user = 1; + uaddr = uaddr_to; pte_to = follow_table(mm, uaddr_to); - if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) { - uaddr = uaddr_to; - write_user = 1; + error_code = (unsigned long) pte_to; + if (error_code < 0x1000) + goto fault; + if (!pte_present(*pte_to)) { + error_code = 0x11; + goto fault; + } else if (!pte_write(*pte_to)) { + error_code = 0x04; goto fault; } @@ -329,7 +284,7 @@ retry: return n - done; fault: spin_unlock(&mm->page_table_lock); - if (__handle_fault(mm, uaddr, write_user)) + if (__handle_fault(uaddr, error_code, write_user)) return n - done; goto retry; } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 5a9e9a77dc16..fc102e70d9c2 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -442,6 +442,29 @@ no_context: } #endif +int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user) +{ + struct pt_regs regs; + int access, fault; + + regs.psw.mask = psw_kernel_bits; + if (!irqs_disabled()) + regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; + regs.psw.addr = (unsigned long) __builtin_return_address(0); + regs.psw.addr |= PSW_ADDR_AMODE; + uaddr &= PAGE_MASK; + access = write_user ? VM_WRITE : VM_READ; + fault = do_exception(®s, access, uaddr | 2); + if (unlikely(fault)) { + if (fault & VM_FAULT_OOM) { + pagefault_out_of_memory(); + fault = 0; + } else if (fault & VM_FAULT_SIGBUS) + do_sigbus(®s, int_code, uaddr); + } + return fault ? -EFAULT : 0; +} + #ifdef CONFIG_PFAULT /* * 'pfault' pseudo page faults routines. -- cgit v1.2.1 From 779c27820a6bd53523a34551aa6004045a060dcf Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Mon, 7 Dec 2009 12:51:49 +0100 Subject: [S390] dasd: remove dead code the todclk.h header file is dead code. Remove it. Signed-off-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/todclk.h | 23 ----------------------- 1 file changed, 23 deletions(-) delete mode 100644 arch/s390/include/asm/todclk.h (limited to 'arch/s390') diff --git a/arch/s390/include/asm/todclk.h b/arch/s390/include/asm/todclk.h deleted file mode 100644 index c7f62055488a..000000000000 --- a/arch/s390/include/asm/todclk.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * File...........: linux/include/asm/todclk.h - * Author(s)......: Holger Smolinski - * Bugreports.to..: - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 - * - * History of changes (starts July 2000) - */ - -#ifndef __ASM_TODCLK_H -#define __ASM_TODCLK_H - -#ifdef __KERNEL__ - -#define TOD_uSEC (0x1000ULL) -#define TOD_mSEC (1000 * TOD_uSEC) -#define TOD_SEC (1000 * TOD_mSEC) -#define TOD_MIN (60 * TOD_SEC) -#define TOD_HOUR (60 * TOD_MIN) - -#endif - -#endif -- cgit v1.2.1 From 39475179d40996b4efa662e3825735a84d2526d1 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Mon, 7 Dec 2009 12:52:05 +0100 Subject: [S390] Improve code generated by atomic operations. Git commit ea435467500612636f8f4fb639ff6e76b2496e4b changed the definition of atomic_t and atomic64_t for s390 by adding the volatile modifier to the counter field. This has an unfortunate side effect with newer versions of the gcc. The typeof operator now picks up the volatile modifier from the expression. This causes the compiler to think that it has to store the two temporary variable old_val and new_val in the __CS_LOOP for the different atomic operations to the stack as the variables are now volatile. Both stores are superfluous. The hack to replace typeof(ptr->counter) with int in __CS_LOOP and and long long in __CSG_LOOP avoids the two stores. A better solution would be to drop the volatile from the counter field of the atomic_t and atomic64_t definition. But that is a touchy subject .. Cc: Matthew Wilcox Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/atomic.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index ae7c8f9f94a5..2a113d6a7dfd 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -21,7 +21,7 @@ #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) #define __CS_LOOP(ptr, op_val, op_string) ({ \ - typeof(ptr->counter) old_val, new_val; \ + int old_val, new_val; \ asm volatile( \ " l %0,%2\n" \ "0: lr %1,%0\n" \ @@ -38,7 +38,7 @@ #else /* __GNUC__ */ #define __CS_LOOP(ptr, op_val, op_string) ({ \ - typeof(ptr->counter) old_val, new_val; \ + int old_val, new_val; \ asm volatile( \ " l %0,0(%3)\n" \ "0: lr %1,%0\n" \ @@ -143,7 +143,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) #define __CSG_LOOP(ptr, op_val, op_string) ({ \ - typeof(ptr->counter) old_val, new_val; \ + long long old_val, new_val; \ asm volatile( \ " lg %0,%2\n" \ "0: lgr %1,%0\n" \ @@ -160,7 +160,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #else /* __GNUC__ */ #define __CSG_LOOP(ptr, op_val, op_string) ({ \ - typeof(ptr->counter) old_val, new_val; \ + long long old_val, new_val; \ asm volatile( \ " lg %0,0(%3)\n" \ "0: lgr %1,%0\n" \ -- cgit v1.2.1 From 52b169c864ea8622c4755172844fd24168c81195 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Mon, 7 Dec 2009 12:52:06 +0100 Subject: [S390] cmm: free pages on hibernate. The pages allocated by the cmm memory balloon should be freed before the hibernation image is created. Otherwise the memory reserved by the balloon gets written to the swap device but there is no content in these pages that need to be preserved. Signed-off-by: Martin Schwidefsky --- arch/s390/mm/cmm.c | 61 ++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 50 insertions(+), 11 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index b201135cc18c..5e5f3849660f 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -44,6 +45,7 @@ static volatile long cmm_pages_target; static volatile long cmm_timed_pages_target; static long cmm_timeout_pages; static long cmm_timeout_seconds; +static int cmm_suspended; static struct cmm_page_array *cmm_page_list; static struct cmm_page_array *cmm_timed_page_list; @@ -147,9 +149,9 @@ cmm_thread(void *dummy) while (1) { rc = wait_event_interruptible(cmm_thread_wait, - (cmm_pages != cmm_pages_target || - cmm_timed_pages != cmm_timed_pages_target || - kthread_should_stop())); + (!cmm_suspended && (cmm_pages != cmm_pages_target || + cmm_timed_pages != cmm_timed_pages_target)) || + kthread_should_stop()); if (kthread_should_stop() || rc == -ERESTARTSYS) { cmm_pages_target = cmm_pages; cmm_timed_pages_target = cmm_timed_pages; @@ -411,6 +413,38 @@ cmm_smsg_target(char *from, char *msg) static struct ctl_table_header *cmm_sysctl_header; +static int cmm_suspend(void) +{ + cmm_suspended = 1; + cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); + cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); + return 0; +} + +static int cmm_resume(void) +{ + cmm_suspended = 0; + cmm_kick_thread(); + return 0; +} + +static int cmm_power_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + switch (event) { + case PM_POST_HIBERNATION: + return cmm_resume(); + case PM_HIBERNATION_PREPARE: + return cmm_suspend(); + default: + return NOTIFY_DONE; + } +} + +static struct notifier_block cmm_power_notifier = { + .notifier_call = cmm_power_event, +}; + static int cmm_init (void) { @@ -419,7 +453,7 @@ cmm_init (void) #ifdef CONFIG_CMM_PROC cmm_sysctl_header = register_sysctl_table(cmm_dir_table); if (!cmm_sysctl_header) - goto out; + goto out_sysctl; #endif #ifdef CONFIG_CMM_IUCV rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target); @@ -429,17 +463,21 @@ cmm_init (void) rc = register_oom_notifier(&cmm_oom_nb); if (rc < 0) goto out_oom_notify; + rc = register_pm_notifier(&cmm_power_notifier); + if (rc) + goto out_pm; init_waitqueue_head(&cmm_thread_wait); init_timer(&cmm_timer); cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; - if (!rc) - goto out; - /* - * kthread_create failed. undo all the stuff from above again. - */ - unregister_oom_notifier(&cmm_oom_nb); + if (rc) + goto out_kthread; + return 0; +out_kthread: + unregister_pm_notifier(&cmm_power_notifier); +out_pm: + unregister_oom_notifier(&cmm_oom_nb); out_oom_notify: #ifdef CONFIG_CMM_IUCV smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); @@ -447,8 +485,8 @@ out_smsg: #endif #ifdef CONFIG_CMM_PROC unregister_sysctl_table(cmm_sysctl_header); +out_sysctl: #endif -out: return rc; } @@ -456,6 +494,7 @@ static void cmm_exit(void) { kthread_stop(cmm_thread_ptr); + unregister_pm_notifier(&cmm_power_notifier); unregister_oom_notifier(&cmm_oom_nb); cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); -- cgit v1.2.1 From c20c89a692872a4090b1004e6554fdc282baf72f Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 7 Dec 2009 12:52:07 +0100 Subject: [S390] smp: remove unused typedef and defines Remove unused typedef, defines, update copyright, remove unneeded includes, remove unneeded ifdefs. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/smp.h | 54 +++++++++------------------------------------ 1 file changed, 10 insertions(+), 44 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index a868b272c257..2ab1141eeb50 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -1,57 +1,22 @@ /* - * include/asm-s390/smp.h - * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), - * Martin Schwidefsky (schwidefsky@de.ibm.com) - * Heiko Carstens (heiko.carstens@de.ibm.com) + * Copyright IBM Corp. 1999,2009 + * Author(s): Denis Joseph Barrow, + * Martin Schwidefsky , + * Heiko Carstens , */ #ifndef __ASM_SMP_H #define __ASM_SMP_H -#include -#include -#include +#ifdef CONFIG_SMP -#if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__) - -#include -#include -#include #include - -/* - s390 specific smp.c headers - */ -typedef struct -{ - int intresting; - sigp_ccode ccode; - __u32 status; - __u16 cpu; -} sigp_info; +#include extern void machine_restart_smp(char *); extern void machine_halt_smp(void); extern void machine_power_off_smp(void); -#define NO_PROC_ID 0xFF /* No processor magic marker */ - -/* - * This magic constant controls our willingness to transfer - * a process across CPUs. Such a transfer incurs misses on the L1 - * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My - * gut feeling is this will vary by board in value. For a board - * with separate L2 cache it probably depends also on the RSS, and - * for a board with shared L2 cache it ought to decay fast as other - * processes are run. - */ - -#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ - #define raw_smp_processor_id() (S390_lowcore.cpu_nr) -#define cpu_logical_map(cpu) (cpu) extern int __cpu_disable (void); extern void __cpu_die (unsigned int cpu); @@ -64,7 +29,9 @@ extern int smp_cpu_polarization[]; extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -#endif +extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; + +#endif /* CONFIG_SMP */ #ifdef CONFIG_HOTPLUG_CPU extern int smp_rescan_cpus(void); @@ -72,5 +39,4 @@ extern int smp_rescan_cpus(void); static inline int smp_rescan_cpus(void) { return 0; } #endif -extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; -#endif +#endif /* __ASM_SMP_H */ -- cgit v1.2.1 From df27d7baf6fadfc7a2a5e57e7ea43010dc819096 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Mon, 7 Dec 2009 12:52:08 +0100 Subject: [S390] use generic termbits.h header file A compare shows that arch/s390/include/asm/termbits.h is the same as include/asm-generic.h. This patch lets arch/s390/include/asm/termbits.h include the generic header. Signed-off-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/termbits.h | 206 +-------------------------------------- 1 file changed, 3 insertions(+), 203 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/termbits.h b/arch/s390/include/asm/termbits.h index 58731853d529..71bf6ac6a2b9 100644 --- a/arch/s390/include/asm/termbits.h +++ b/arch/s390/include/asm/termbits.h @@ -1,206 +1,6 @@ -/* - * include/asm-s390/termbits.h - * - * S390 version - * - * Derived from "include/asm-i386/termbits.h" - */ +#ifndef _ASM_S390_TERMBITS_H +#define _ASM_S390_TERMBITS_H -#ifndef __ARCH_S390_TERMBITS_H__ -#define __ARCH_S390_TERMBITS_H__ - -#include - -typedef unsigned char cc_t; -typedef unsigned int speed_t; -typedef unsigned int tcflag_t; - -#define NCCS 19 -struct termios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ -}; - -struct termios2 { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ -}; - -struct ktermios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ -}; - -/* c_cc characters */ -#define VINTR 0 -#define VQUIT 1 -#define VERASE 2 -#define VKILL 3 -#define VEOF 4 -#define VTIME 5 -#define VMIN 6 -#define VSWTC 7 -#define VSTART 8 -#define VSTOP 9 -#define VSUSP 10 -#define VEOL 11 -#define VREPRINT 12 -#define VDISCARD 13 -#define VWERASE 14 -#define VLNEXT 15 -#define VEOL2 16 - -/* c_iflag bits */ -#define IGNBRK 0000001 -#define BRKINT 0000002 -#define IGNPAR 0000004 -#define PARMRK 0000010 -#define INPCK 0000020 -#define ISTRIP 0000040 -#define INLCR 0000100 -#define IGNCR 0000200 -#define ICRNL 0000400 -#define IUCLC 0001000 -#define IXON 0002000 -#define IXANY 0004000 -#define IXOFF 0010000 -#define IMAXBEL 0020000 -#define IUTF8 0040000 - -/* c_oflag bits */ -#define OPOST 0000001 -#define OLCUC 0000002 -#define ONLCR 0000004 -#define OCRNL 0000010 -#define ONOCR 0000020 -#define ONLRET 0000040 -#define OFILL 0000100 -#define OFDEL 0000200 -#define NLDLY 0000400 -#define NL0 0000000 -#define NL1 0000400 -#define CRDLY 0003000 -#define CR0 0000000 -#define CR1 0001000 -#define CR2 0002000 -#define CR3 0003000 -#define TABDLY 0014000 -#define TAB0 0000000 -#define TAB1 0004000 -#define TAB2 0010000 -#define TAB3 0014000 -#define XTABS 0014000 -#define BSDLY 0020000 -#define BS0 0000000 -#define BS1 0020000 -#define VTDLY 0040000 -#define VT0 0000000 -#define VT1 0040000 -#define FFDLY 0100000 -#define FF0 0000000 -#define FF1 0100000 - -/* c_cflag bit meaning */ -#define CBAUD 0010017 -#define B0 0000000 /* hang up */ -#define B50 0000001 -#define B75 0000002 -#define B110 0000003 -#define B134 0000004 -#define B150 0000005 -#define B200 0000006 -#define B300 0000007 -#define B600 0000010 -#define B1200 0000011 -#define B1800 0000012 -#define B2400 0000013 -#define B4800 0000014 -#define B9600 0000015 -#define B19200 0000016 -#define B38400 0000017 -#define EXTA B19200 -#define EXTB B38400 -#define CSIZE 0000060 -#define CS5 0000000 -#define CS6 0000020 -#define CS7 0000040 -#define CS8 0000060 -#define CSTOPB 0000100 -#define CREAD 0000200 -#define PARENB 0000400 -#define PARODD 0001000 -#define HUPCL 0002000 -#define CLOCAL 0004000 -#define CBAUDEX 0010000 -#define BOTHER 0010000 -#define B57600 0010001 -#define B115200 0010002 -#define B230400 0010003 -#define B460800 0010004 -#define B500000 0010005 -#define B576000 0010006 -#define B921600 0010007 -#define B1000000 0010010 -#define B1152000 0010011 -#define B1500000 0010012 -#define B2000000 0010013 -#define B2500000 0010014 -#define B3000000 0010015 -#define B3500000 0010016 -#define B4000000 0010017 -#define CIBAUD 002003600000 /* input baud rate */ -#define CMSPAR 010000000000 /* mark or space (stick) parity */ -#define CRTSCTS 020000000000 /* flow control */ - -#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ - -/* c_lflag bits */ -#define ISIG 0000001 -#define ICANON 0000002 -#define XCASE 0000004 -#define ECHO 0000010 -#define ECHOE 0000020 -#define ECHOK 0000040 -#define ECHONL 0000100 -#define NOFLSH 0000200 -#define TOSTOP 0000400 -#define ECHOCTL 0001000 -#define ECHOPRT 0002000 -#define ECHOKE 0004000 -#define FLUSHO 0010000 -#define PENDIN 0040000 -#define IEXTEN 0100000 - -/* tcflow() and TCXONC use these */ -#define TCOOFF 0 -#define TCOON 1 -#define TCIOFF 2 -#define TCION 3 - -/* tcflush() and TCFLSH use these */ -#define TCIFLUSH 0 -#define TCOFLUSH 1 -#define TCIOFLUSH 2 - -/* tcsetattr uses these */ -#define TCSANOW 0 -#define TCSADRAIN 1 -#define TCSAFLUSH 2 +#include #endif -- cgit v1.2.1 From 2f319a5249830392090896cd794fabf9d6d4a4ed Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Mon, 7 Dec 2009 12:52:09 +0100 Subject: [S390] use generic sockios.h header file A compare shows that arch/s390/include/asm/sockios.h is the same as include/asm-generic/sockios.h. This patch lets arch/s390/include/asm/ termbits.h include the generic header. Signed-off-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/sockios.h | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/sockios.h b/arch/s390/include/asm/sockios.h index f4fc16c7da59..6f60eee73242 100644 --- a/arch/s390/include/asm/sockios.h +++ b/arch/s390/include/asm/sockios.h @@ -1,21 +1,6 @@ -/* - * include/asm-s390/sockios.h - * - * S390 version - * - * Derived from "include/asm-i386/sockios.h" - */ +#ifndef _ASM_S390_SOCKIOS_H +#define _ASM_S390_SOCKIOS_H -#ifndef __ARCH_S390_SOCKIOS__ -#define __ARCH_S390_SOCKIOS__ - -/* Socket-level I/O control calls. */ -#define FIOSETOWN 0x8901 -#define SIOCSPGRP 0x8902 -#define FIOGETOWN 0x8903 -#define SIOCGPGRP 0x8904 -#define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ -#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ +#include #endif -- cgit v1.2.1 From 6a985c6194017de2c062916ad1cd00dee0302c40 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Mon, 7 Dec 2009 12:52:11 +0100 Subject: [S390] s390: use change recording override for kernel mapping We dont need the dirty bit if a write access is done via the kernel mapping. In that case SetPageDirty and friends are used anyway, no need to do that a second time. We can use the change-recording overide function for the kernel mapping, if available. Signed-off-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/pgtable.h | 4 +++- arch/s390/mm/vmem.c | 11 ++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 60a7b1a1702f..e2fa79cf0614 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -169,12 +169,13 @@ extern unsigned long VMALLOC_START; * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) * * A 64 bit pagetable entry of S390 has following format: - * | PFRA |0IP0| OS | + * | PFRA |0IPC| OS | * 0000000000111111111122222222223333333333444444444455555555556666 * 0123456789012345678901234567890123456789012345678901234567890123 * * I Page-Invalid Bit: Page is not available for address-translation * P Page-Protection Bit: Store access not possible for page + * C Change-bit override: HW is not required to set change bit * * A 64 bit segmenttable entry of S390 has following format: * | P-table origin | TT @@ -218,6 +219,7 @@ extern unsigned long VMALLOC_START; */ /* Hardware bits in the page table entry */ +#define _PAGE_CO 0x100 /* HW Change-bit override */ #define _PAGE_RO 0x200 /* HW read-only bit */ #define _PAGE_INVALID 0x400 /* HW invalid bit */ diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 5f91a38d7592..300ab012b0fd 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -70,8 +70,12 @@ static pte_t __ref *vmem_pte_alloc(void) pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); if (!pte) return NULL; - clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, - PTRS_PER_PTE * sizeof(pte_t)); + if (MACHINE_HAS_HPAGE) + clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO, + PTRS_PER_PTE * sizeof(pte_t)); + else + clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, + PTRS_PER_PTE * sizeof(pte_t)); return pte; } @@ -112,7 +116,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && (address + HPAGE_SIZE <= start + size) && (address >= HPAGE_SIZE)) { - pte_val(pte) |= _SEGMENT_ENTRY_LARGE; + pte_val(pte) |= _SEGMENT_ENTRY_LARGE | + _SEGMENT_ENTRY_CO; pmd_val(*pm_dir) = pte_val(pte); address += HPAGE_SIZE - PAGE_SIZE; continue; -- cgit v1.2.1 From bd119ee29b447c521d66f6a5095fd3533d845da7 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 7 Dec 2009 12:52:14 +0100 Subject: [S390] etr/stp: put correct per cpu variable Fix this compile error in linux-next: arch/s390/kernel/time.c: In function 'get_sync_clock': arch/s390/kernel/time.c:337: error: 'clock_sync_sync' undeclared (first use in this function) Gets exposed because the new per cpu code references the variable passed to put_cpu_var. This was not a real bug. Reported-by: Sachin Sant Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/time.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 34162a0b2caa..75cdc0d02415 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -334,7 +334,7 @@ int get_sync_clock(unsigned long long *clock) sw0 = atomic_read(sw_ptr); *clock = get_clock(); sw1 = atomic_read(sw_ptr); - put_cpu_var(clock_sync_sync); + put_cpu_var(clock_sync_word); if (sw0 == sw1 && (sw0 & 0x80000000U)) /* Success: time is in sync. */ return 0; @@ -384,7 +384,7 @@ static inline int check_sync_clock(void) sw_ptr = &get_cpu_var(clock_sync_word); rc = (atomic_read(sw_ptr) & 0x80000000U) != 0; - put_cpu_var(clock_sync_sync); + put_cpu_var(clock_sync_word); return rc; } -- cgit v1.2.1 From b0694685bc892c60009335e61af1f058f06ccf64 Mon Sep 17 00:00:00 2001 From: Aristeu Rozanski Date: Mon, 7 Dec 2009 12:52:15 +0100 Subject: [S390] ftrace: build ftrace.o when CONFIG_FTRACE_SYSCALLS is set for s390 Trying to build a s390x kernel with CONFIG_FTRACE_SYSCALLS will fail because ftrace.o is not built/linked. Acked-by: Frederic Weisbecker Signed-off-by: Aristeu Rozanski Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/Makefile | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index c7be8e10b87e..683f6381cc59 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -44,6 +44,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o +obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o # Kexec part S390_KEXEC_OBJS := machine_kexec.o crash.o -- cgit v1.2.1 From 2da3cf9755ea992a18650860fde74e3bfa8c8b65 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Mon, 7 Dec 2009 12:52:16 +0100 Subject: [S390] s390: remove unused nfsd #includes Some unused includes removed. This patch is in an effort to cleanup nfsd headers and move private definitions to source directory. Signed-off-by: Boaz Harrosh Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/compat_linux.c | 6 ------ arch/s390/kernel/compat_linux.h | 4 ---- 2 files changed, 10 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 0debcec23a39..29a2f34b9555 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -31,14 +31,8 @@ #include #include #include -#include #include #include -#include -#include -#include -#include -#include #include #include #include diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h index c07f9ca05ade..81d6ee826ff3 100644 --- a/arch/s390/kernel/compat_linux.h +++ b/arch/s390/kernel/compat_linux.h @@ -4,10 +4,6 @@ #include #include #include -#include -#include -#include -#include /* Macro that masks the high order bit of an 32 bit pointer and converts it*/ /* to a 64 bit pointer */ -- cgit v1.2.1 From 570dcf2c15463842e384eb597a87c1e39bead99b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 30 Nov 2009 16:52:36 -0500 Subject: Kill ancient crap in s390 compat mmap We've had TASK_SIZE set to 1<<31 for 31bit tasks since May 2004. Before that old32_mmap() had to deal with do_mmap_pgoff() giving it an address out of range. It had tried to do that by checking return value and doing do_munmap() (at wrong address, BTW). IOW, that code had been dead for 5.5 years (and bogus - for 8). Kill. Acked-by: Hugh Dickins Signed-off-by: Al Viro --- arch/s390/kernel/compat_linux.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 25c31d681402..11556aa6bf17 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -642,11 +642,6 @@ static inline long do_mmap2( down_write(¤t->mm->mmap_sem); error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) { - /* Result is out of bounds. */ - do_munmap(current->mm, addr, len); - error = -ENOMEM; - } up_write(¤t->mm->mmap_sem); if (file) -- cgit v1.2.1 From f8b7256096a20436f6d0926747e3ac3d64c81d24 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 30 Nov 2009 17:37:04 -0500 Subject: Unify sys_mmap* New helper - sys_mmap_pgoff(); switch syscalls to using it. Acked-by: David S. Miller Signed-off-by: Al Viro --- arch/s390/kernel/compat_linux.c | 32 +++----------------------------- arch/s390/kernel/sys_s390.c | 30 ++---------------------------- 2 files changed, 5 insertions(+), 57 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 11556aa6bf17..22c9e557bb22 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -624,33 +624,6 @@ struct mmap_arg_struct_emu31 { u32 offset; }; -/* common code for old and new mmaps */ -static inline long do_mmap2( - unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - struct file * file = NULL; - unsigned long error = -EBADF; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - - asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg) { @@ -664,7 +637,8 @@ old32_mmap(struct mmap_arg_struct_emu31 __user *arg) if (a.offset & ~PAGE_MASK) goto out; - error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); + error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, + a.offset >> PAGE_SHIFT); out: return error; } @@ -677,7 +651,7 @@ sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg) if (copy_from_user(&a, arg, sizeof(a))) goto out; - error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); + error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); out: return error; } diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index e9d94f61d500..86a74c9c9e63 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -32,32 +32,6 @@ #include #include "entry.h" -/* common code for old and new mmaps */ -static inline long do_mmap2( - unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - long error = -EBADF; - struct file * file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - /* * Perform the select(nd, in, out, ex, tv) and mmap() system * calls. Linux for S/390 isn't able to handle more than 5 @@ -81,7 +55,7 @@ SYSCALL_DEFINE1(mmap2, struct mmap_arg_struct __user *, arg) if (copy_from_user(&a, arg, sizeof(a))) goto out; - error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); + error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); out: return error; } @@ -98,7 +72,7 @@ SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct __user *, arg) if (a.offset & ~PAGE_MASK) goto out; - error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); + error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); out: return error; } -- cgit v1.2.1 From 559df2e0210352f83926d178c40c51142292a18c Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Apr 2009 22:35:10 +0200 Subject: kbuild: move asm-offsets.h to include/generated The simplest method was to add an extra asm-offsets.h file in arch/$ARCH/include/asm that references the generated file. We can now migrate the architectures one-by-one to reference the generated file direct - and when done we can delete the temporary arch/$ARCH/include/asm/asm-offsets.h file. Signed-off-by: Sam Ravnborg Cc: Al Viro Signed-off-by: Michal Marek --- arch/s390/include/asm/asm-offsets.h | 1 + 1 file changed, 1 insertion(+) create mode 100644 arch/s390/include/asm/asm-offsets.h (limited to 'arch/s390') diff --git a/arch/s390/include/asm/asm-offsets.h b/arch/s390/include/asm/asm-offsets.h new file mode 100644 index 000000000000..d370ee36a182 --- /dev/null +++ b/arch/s390/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include -- cgit v1.2.1 From 445c89514be242b1b0080056d50bdc1b72adeb5c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 2 Dec 2009 19:49:50 +0100 Subject: locking: Convert raw_spinlock to arch_spinlock The raw_spin* namespace was taken by lockdep for the architecture specific implementations. raw_spin_* would be the ideal name space for the spinlocks which are not converted to sleeping locks in preempt-rt. Linus suggested to convert the raw_ to arch_ locks and cleanup the name space instead of using an artifical name like core_spin, atomic_spin or whatever No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/s390/include/asm/spinlock.h | 16 ++++++++-------- arch/s390/include/asm/spinlock_types.h | 2 +- arch/s390/lib/spinlock.c | 8 ++++---- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index c9af0d19c7ab..6121fa4b83d9 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -57,12 +57,12 @@ _raw_compare_and_swap(volatile unsigned int *lock, do { while (__raw_spin_is_locked(lock)) \ _raw_spin_relax(lock); } while (0) -extern void _raw_spin_lock_wait(raw_spinlock_t *); -extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags); -extern int _raw_spin_trylock_retry(raw_spinlock_t *); -extern void _raw_spin_relax(raw_spinlock_t *lock); +extern void _raw_spin_lock_wait(arch_spinlock_t *); +extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); +extern int _raw_spin_trylock_retry(arch_spinlock_t *); +extern void _raw_spin_relax(arch_spinlock_t *lock); -static inline void __raw_spin_lock(raw_spinlock_t *lp) +static inline void __raw_spin_lock(arch_spinlock_t *lp) { int old; @@ -72,7 +72,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lp) _raw_spin_lock_wait(lp); } -static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, +static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, unsigned long flags) { int old; @@ -83,7 +83,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, _raw_spin_lock_wait_flags(lp, flags); } -static inline int __raw_spin_trylock(raw_spinlock_t *lp) +static inline int __raw_spin_trylock(arch_spinlock_t *lp) { int old; @@ -93,7 +93,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lp) return _raw_spin_trylock_retry(lp); } -static inline void __raw_spin_unlock(raw_spinlock_t *lp) +static inline void __raw_spin_unlock(arch_spinlock_t *lp) { _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); } diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index 654abc40de04..a93638eee3f7 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned int owner_cpu; -} __attribute__ ((aligned (4))) raw_spinlock_t; +} __attribute__ ((aligned (4))) arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index f7e0d30250b7..d4cbf71a6077 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu) _raw_yield(); } -void _raw_spin_lock_wait(raw_spinlock_t *lp) +void _raw_spin_lock_wait(arch_spinlock_t *lp) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -59,7 +59,7 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp) } EXPORT_SYMBOL(_raw_spin_lock_wait); -void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) +void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -82,7 +82,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) } EXPORT_SYMBOL(_raw_spin_lock_wait_flags); -int _raw_spin_trylock_retry(raw_spinlock_t *lp) +int _raw_spin_trylock_retry(arch_spinlock_t *lp) { unsigned int cpu = ~smp_processor_id(); int count; @@ -97,7 +97,7 @@ int _raw_spin_trylock_retry(raw_spinlock_t *lp) } EXPORT_SYMBOL(_raw_spin_trylock_retry); -void _raw_spin_relax(raw_spinlock_t *lock) +void _raw_spin_relax(arch_spinlock_t *lock) { unsigned int cpu = lock->owner_cpu; if (cpu != 0) -- cgit v1.2.1 From edc35bd72e2079b25f99c5da7d7a65dbbffc4a26 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 Dec 2009 12:38:57 +0100 Subject: locking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED Further name space cleanup. No functional change Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/s390/include/asm/spinlock_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index a93638eee3f7..e25c0370f6cd 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int owner_cpu; } __attribute__ ((aligned (4))) arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -- cgit v1.2.1 From 0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 2 Dec 2009 20:01:25 +0100 Subject: locking: Convert __raw_spin* functions to arch_spin* Name space cleanup. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/s390/include/asm/spinlock.h | 34 +++++++++++++++++----------------- arch/s390/lib/spinlock.c | 22 +++++++++++----------- 2 files changed, 28 insertions(+), 28 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 6121fa4b83d9..a94c146657a9 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock, * (the type definitions are in asm/spinlock_types.h) */ -#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) \ - _raw_spin_relax(lock); } while (0) +#define arch_spin_is_locked(x) ((x)->owner_cpu != 0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) \ + arch_spin_relax(lock); } while (0) -extern void _raw_spin_lock_wait(arch_spinlock_t *); -extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); -extern int _raw_spin_trylock_retry(arch_spinlock_t *); -extern void _raw_spin_relax(arch_spinlock_t *lock); +extern void arch_spin_lock_wait(arch_spinlock_t *); +extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); +extern int arch_spin_trylock_retry(arch_spinlock_t *); +extern void arch_spin_relax(arch_spinlock_t *lock); -static inline void __raw_spin_lock(arch_spinlock_t *lp) +static inline void arch_spin_lock(arch_spinlock_t *lp) { int old; old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return; - _raw_spin_lock_wait(lp); + arch_spin_lock_wait(lp); } -static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, +static inline void arch_spin_lock_flags(arch_spinlock_t *lp, unsigned long flags) { int old; @@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return; - _raw_spin_lock_wait_flags(lp, flags); + arch_spin_lock_wait_flags(lp, flags); } -static inline int __raw_spin_trylock(arch_spinlock_t *lp) +static inline int arch_spin_trylock(arch_spinlock_t *lp) { int old; old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return 1; - return _raw_spin_trylock_retry(lp); + return arch_spin_trylock_retry(lp); } -static inline void __raw_spin_unlock(arch_spinlock_t *lp) +static inline void arch_spin_unlock(arch_spinlock_t *lp) { _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); } @@ -188,7 +188,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return _raw_write_trylock_retry(rw); } -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index d4cbf71a6077..f4596452f072 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu) _raw_yield(); } -void _raw_spin_lock_wait(arch_spinlock_t *lp) +void arch_spin_lock_wait(arch_spinlock_t *lp) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -51,15 +51,15 @@ void _raw_spin_lock_wait(arch_spinlock_t *lp) _raw_yield_cpu(~owner); count = spin_retry; } - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return; } } -EXPORT_SYMBOL(_raw_spin_lock_wait); +EXPORT_SYMBOL(arch_spin_lock_wait); -void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) +void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) _raw_yield_cpu(~owner); count = spin_retry; } - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; local_irq_disable(); if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) @@ -80,30 +80,30 @@ void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) local_irq_restore(flags); } } -EXPORT_SYMBOL(_raw_spin_lock_wait_flags); +EXPORT_SYMBOL(arch_spin_lock_wait_flags); -int _raw_spin_trylock_retry(arch_spinlock_t *lp) +int arch_spin_trylock_retry(arch_spinlock_t *lp) { unsigned int cpu = ~smp_processor_id(); int count; for (count = spin_retry; count > 0; count--) { - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return 1; } return 0; } -EXPORT_SYMBOL(_raw_spin_trylock_retry); +EXPORT_SYMBOL(arch_spin_trylock_retry); -void _raw_spin_relax(arch_spinlock_t *lock) +void arch_spin_relax(arch_spinlock_t *lock) { unsigned int cpu = lock->owner_cpu; if (cpu != 0) _raw_yield_cpu(~cpu); } -EXPORT_SYMBOL(_raw_spin_relax); +EXPORT_SYMBOL(arch_spin_relax); void _raw_read_lock_wait(raw_rwlock_t *rw) { -- cgit v1.2.1 From fb3a6bbc912b12347614e5742c7c61416cdb0ca0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 Dec 2009 20:01:19 +0100 Subject: locking: Convert raw_rwlock to arch_rwlock Not strictly necessary for -rt as -rt does not have non sleeping rwlocks, but it's odd to not have a consistent naming convention. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/s390/include/asm/spinlock.h | 30 +++++++++++++++--------------- arch/s390/include/asm/spinlock_types.h | 4 ++-- arch/s390/lib/spinlock.c | 12 ++++++------ 3 files changed, 23 insertions(+), 23 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index a94c146657a9..7f98f0e48acb 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -121,14 +121,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) */ #define __raw_write_can_lock(x) ((x)->lock == 0) -extern void _raw_read_lock_wait(raw_rwlock_t *lp); -extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); -extern int _raw_read_trylock_retry(raw_rwlock_t *lp); -extern void _raw_write_lock_wait(raw_rwlock_t *lp); -extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); -extern int _raw_write_trylock_retry(raw_rwlock_t *lp); - -static inline void __raw_read_lock(raw_rwlock_t *rw) +extern void _raw_read_lock_wait(arch_rwlock_t *lp); +extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); +extern int _raw_read_trylock_retry(arch_rwlock_t *lp); +extern void _raw_write_lock_wait(arch_rwlock_t *lp); +extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); +extern int _raw_write_trylock_retry(arch_rwlock_t *lp); + +static inline void __raw_read_lock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) _raw_read_lock_wait(rw); } -static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) +static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) _raw_read_lock_wait_flags(rw, flags); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { unsigned int old, cmp; @@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) } while (cmp != old); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait(rw); } -static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags) +static inline void __raw_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait_flags(rw, flags); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { _raw_compare_and_swap(&rw->lock, 0x80000000, 0); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -181,7 +181,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return _raw_read_trylock_retry(rw); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) return 1; diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index e25c0370f6cd..9c76656a0af0 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -13,8 +13,8 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index f4596452f072..09fee9a1aa15 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -105,7 +105,7 @@ void arch_spin_relax(arch_spinlock_t *lock) } EXPORT_SYMBOL(arch_spin_relax); -void _raw_read_lock_wait(raw_rwlock_t *rw) +void _raw_read_lock_wait(arch_rwlock_t *rw) { unsigned int old; int count = spin_retry; @@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_lock_wait); -void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) +void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) { unsigned int old; int count = spin_retry; @@ -145,7 +145,7 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) } EXPORT_SYMBOL(_raw_read_lock_wait_flags); -int _raw_read_trylock_retry(raw_rwlock_t *rw) +int _raw_read_trylock_retry(arch_rwlock_t *rw) { unsigned int old; int count = spin_retry; @@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_trylock_retry); -void _raw_write_lock_wait(raw_rwlock_t *rw) +void _raw_write_lock_wait(arch_rwlock_t *rw) { int count = spin_retry; @@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_write_lock_wait); -void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) +void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) { int count = spin_retry; @@ -197,7 +197,7 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) } EXPORT_SYMBOL(_raw_write_lock_wait_flags); -int _raw_write_trylock_retry(raw_rwlock_t *rw) +int _raw_write_trylock_retry(arch_rwlock_t *rw) { int count = spin_retry; -- cgit v1.2.1 From e5931943d02bf751b1ec849c0d2ade23d76a8d41 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 Dec 2009 20:08:46 +0100 Subject: locking: Convert raw_rwlock functions to arch_rwlock Name space cleanup for rwlock functions. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/s390/include/asm/spinlock.h | 20 ++++++++++---------- arch/s390/lib/spinlock.c | 12 ++++++------ 2 files changed, 16 insertions(+), 16 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 7f98f0e48acb..a587907d77f3 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -113,13 +113,13 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((int)(x)->lock >= 0) +#define arch_read_can_lock(x) ((int)(x)->lock >= 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == 0) +#define arch_write_can_lock(x) ((x)->lock == 0) extern void _raw_read_lock_wait(arch_rwlock_t *lp); extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); @@ -128,7 +128,7 @@ extern void _raw_write_lock_wait(arch_rwlock_t *lp); extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); extern int _raw_write_trylock_retry(arch_rwlock_t *lp); -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -136,7 +136,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) _raw_read_lock_wait(rw); } -static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) +static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) _raw_read_lock_wait_flags(rw, flags); } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int old, cmp; @@ -155,24 +155,24 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) } while (cmp != old); } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait(rw); } -static inline void __raw_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) +static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait_flags(rw, flags); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { _raw_compare_and_swap(&rw->lock, 0x80000000, 0); } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -181,7 +181,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) return _raw_read_trylock_retry(rw); } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) return 1; diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 09fee9a1aa15..10754a375668 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -115,7 +115,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) _raw_yield(); count = spin_retry; } - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) @@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) _raw_yield(); count = spin_retry; } - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; local_irq_disable(); @@ -151,7 +151,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw) int count = spin_retry; while (count-- > 0) { - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) @@ -170,7 +170,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) _raw_yield(); count = spin_retry; } - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) return; @@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) _raw_yield(); count = spin_retry; } - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; local_irq_disable(); if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) @@ -202,7 +202,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw) int count = spin_retry; while (count-- > 0) { - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) return 1; -- cgit v1.2.1 From 471452104b8520337ae2fb48c4e61cd4896e025d Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 14 Dec 2009 18:00:08 -0800 Subject: const: constify remaining dev_pm_ops Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/appldata/appldata_base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 495589950dc7..5c91995b74e4 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -551,7 +551,7 @@ static int appldata_thaw(struct device *dev) return appldata_restore(dev); } -static struct dev_pm_ops appldata_pm_ops = { +static const struct dev_pm_ops appldata_pm_ops = { .freeze = appldata_freeze, .thaw = appldata_thaw, .restore = appldata_restore, -- cgit v1.2.1 From e7d2860b690d4f3bed6824757c540579638e3d1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Goddard=20Rosa?= Date: Mon, 14 Dec 2009 18:01:06 -0800 Subject: tree-wide: convert open calls to remove spaces to skip_spaces() lib function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Makes use of skip_spaces() defined in lib/string.c for removing leading spaces from strings all over the tree. It decreases lib.a code size by 47 bytes and reuses the function tree-wide: text data bss dec hex filename 64688 584 592 65864 10148 (TOTALS-BEFORE) 64641 584 592 65817 10119 (TOTALS-AFTER) Also, while at it, if we see (*str && isspace(*str)), we can be sure to remove the first condition (*str) as the second one (isspace(*str)) also evaluates to 0 whenever *str == 0, making it redundant. In other words, "a char equals zero is never a space". Julia Lawall tried the semantic patch (http://coccinelle.lip6.fr) below, and found occurrences of this pattern on 3 more files: drivers/leds/led-class.c drivers/leds/ledtrig-timer.c drivers/video/output.c @@ expression str; @@ ( // ignore skip_spaces cases while (*str && isspace(*str)) { \(str++;\|++str;\) } | - *str && isspace(*str) ) Signed-off-by: AndrĂ© Goddard Rosa Cc: Julia Lawall Cc: Martin Schwidefsky Cc: Jeff Dike Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Richard Purdie Cc: Neil Brown Cc: Kyle McMartin Cc: Henrique de Moraes Holschuh Cc: David Howells Cc: Cc: Samuel Ortiz Cc: Patrick McHardy Cc: Takashi Iwai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/kernel/debug.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 071c81f179ef..0168472b2fdf 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -1178,7 +1179,7 @@ debug_get_uint(char *buf) { int rc; - for(; isspace(*buf); buf++); + buf = skip_spaces(buf); rc = simple_strtoul(buf, &buf, 10); if(*buf){ rc = -EINVAL; -- cgit v1.2.1 From 698ba7b5a3a7be772922340fade365c675b8243f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 15 Dec 2009 16:47:37 -0800 Subject: elf: kill USE_ELF_CORE_DUMP Currently all architectures but microblaze unconditionally define USE_ELF_CORE_DUMP. The microblaze omission seems like an error to me, so let's kill this ifdef and make sure we are the same everywhere. Signed-off-by: Christoph Hellwig Acked-by: Hugh Dickins Cc: Cc: Michal Simek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/include/asm/elf.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index e885442c1dfe..354d42616c7e 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -155,7 +155,6 @@ extern unsigned int vdso_enabled; } while (0) #define CORE_DUMP_USE_REGSET -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical -- cgit v1.2.1 From 70ee9518cfc8baec618e69e4ef22566dcb2f29d3 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 18 Dec 2009 17:43:14 +0100 Subject: [S390] wire up sys_recvmmsg Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/unistd.h | 3 ++- arch/s390/kernel/compat_wrapper.S | 9 +++++++++ arch/s390/kernel/syscalls.S | 1 + 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index cb5232df151e..192a7203a14f 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -269,7 +269,8 @@ #define __NR_pwritev 329 #define __NR_rt_tgsigqueueinfo 330 #define __NR_perf_event_open 331 -#define NR_syscalls 332 +#define __NR_recvmmsg 332 +#define NR_syscalls 333 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 30de2d0e52bb..faeaccc7d7d9 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1853,3 +1853,12 @@ sys32_execve_wrapper: llgtr %r3,%r3 # compat_uptr_t * llgtr %r4,%r4 # compat_uptr_t * jg sys32_execve # branch to system call + + .globl compat_sys_recvmmsg_wrapper +compat_sys_recvmmsg_wrapper: + lgfr %r2,%r2 # int + llgtr %r3,%r3 # struct compat_mmsghdr * + llgfr %r4,%r4 # unsigned int + llgfr %r5,%r5 # unsigned int + llgtr %r6,%r6 # struct compat_timespec * + jg compat_sys_recvmmsg diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 30eca070d426..4f292c936872 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -340,3 +340,4 @@ SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper) +SYSCALL(sys_recvmmsg,sys_recvmmsg,compat_sys_recvmmsg_wrapper) -- cgit v1.2.1 From b59cdcb339fc7286161b80403f6af63acf26876f Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Fri, 18 Dec 2009 17:43:18 +0100 Subject: [S390] s390: PTR_ERR return of wrong pointer in fallback_init_cip() Return the PTR_ERR of the correct pointer. Signed-off-by: Roel Kluin Signed-off-by: Martin Schwidefsky --- arch/s390/crypto/aes_s390.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 6118890c946d..6be4503201ac 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -174,7 +174,7 @@ static int fallback_init_cip(struct crypto_tfm *tfm) if (IS_ERR(sctx->fallback.cip)) { pr_err("Allocating AES fallback algorithm %s failed\n", name); - return PTR_ERR(sctx->fallback.blk); + return PTR_ERR(sctx->fallback.cip); } return 0; -- cgit v1.2.1 From 622e99bf0d54c4517cb0524540cd77257db8621a Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 18 Dec 2009 17:43:20 +0100 Subject: [S390] rename NT_PRXSTATUS to NT_S390_HIGHREGS The elf notes number for the upper register halves is s390 specific. Change the name of the elf notes to include S390. Signed-off-by: Martin Schwidefsky Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/ptrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 653c6a178740..13815d39f7dd 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -959,7 +959,7 @@ static const struct user_regset s390_compat_regsets[] = { .set = s390_fpregs_set, }, [REGSET_GENERAL_EXTENDED] = { - .core_note_type = NT_PRXSTATUS, + .core_note_type = NT_S390_HIGH_GPRS, .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), -- cgit v1.2.1 From ca633fd006486ed2c2d3b542283067aab61e6dc8 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 18 Dec 2009 17:43:24 +0100 Subject: [S390] ptrace: dont abuse PT_PTRACED Nobody except ptrace itself should use task->ptrace or PT_PTRACED directly, change arch/s390/kernel/traps.c to use the helper. Signed-off-by: Oleg Nesterov Acked-by: Roland McGrath Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/traps.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index c2e42cc65ce7..6e7ad63854c0 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include @@ -382,7 +382,7 @@ void __kprobes do_single_step(struct pt_regs *regs) SIGTRAP) == NOTIFY_STOP){ return; } - if ((current->ptrace & PT_PTRACED) != 0) + if (tracehook_consider_fatal_signal(current, SIGTRAP)) force_sig(SIGTRAP, current); } @@ -483,7 +483,7 @@ static void illegal_op(struct pt_regs * regs, long interruption_code) if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) return; if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { - if (current->ptrace & PT_PTRACED) + if (tracehook_consider_fatal_signal(current, SIGTRAP)) force_sig(SIGTRAP, current); else signal = SIGILL; -- cgit v1.2.1 From 1d802e24774c94ec7bdb12b6515226f3341533c1 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 18 Dec 2009 17:43:27 +0100 Subject: [S390] Use strim instead of strstrip to avoid false warnings. Cc: Michael Holzheu Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/hypfs/hypfs_diag.c | 4 ++-- arch/s390/hypfs/hypfs_vm.c | 2 +- arch/s390/kernel/ipl.c | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index 77df726180ba..2b92d501425f 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -164,7 +164,7 @@ static inline void part_hdr__part_name(enum diag204_format type, void *hdr, LPAR_NAME_LEN); EBCASC(name, LPAR_NAME_LEN); name[LPAR_NAME_LEN] = 0; - strstrip(name); + strim(name); } struct cpu_info { @@ -523,7 +523,7 @@ static int diag224_idx2name(int index, char *name) memcpy(name, diag224_cpu_names + ((index + 1) * CPU_NAME_LEN), CPU_NAME_LEN); name[CPU_NAME_LEN] = 0; - strstrip(name); + strim(name); return 0; } diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index d01fc8f799f0..f0b0d31f0b48 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -124,7 +124,7 @@ static int hpyfs_vm_create_guest(struct super_block *sb, /* guest dir */ memcpy(guest_name, data->guest_name, NAME_LEN); EBCASC(guest_name, NAME_LEN); - strstrip(guest_name); + strim(guest_name); guest_dir = hypfs_mkdir(sb, systems_dir, guest_name); if (IS_ERR(guest_dir)) return PTR_ERR(guest_dir); diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 4890ac6d7faa..4d73296fed74 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -221,7 +221,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ const char *buf, size_t len) \ { \ strncpy(_value, buf, sizeof(_value) - 1); \ - strstrip(_value); \ + strim(_value); \ return len; \ } \ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ @@ -472,7 +472,7 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj, return sprintf(page, "#unknown#\n"); memcpy(loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN); EBCASC(loadparm, LOADPARM_LEN); - strstrip(loadparm); + strim(loadparm); return sprintf(page, "%s\n", loadparm); } @@ -776,7 +776,7 @@ static void reipl_get_ascii_loadparm(char *loadparm, memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN); EBCASC(loadparm, LOADPARM_LEN); loadparm[LOADPARM_LEN] = 0; - strstrip(loadparm); + strim(loadparm); } static ssize_t reipl_generic_loadparm_show(struct ipl_parameter_block *ipb, -- cgit v1.2.1 From f5cae7b0fb6927981a839443dfa6d73c30415cba Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Mon, 4 Jan 2010 09:05:43 +0100 Subject: [S390] Have param.h simply include . Since the files have identical content, might as well simplify. Signed-off-by: Robert P. J. Day Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/param.h | 28 ++-------------------------- 1 file changed, 2 insertions(+), 26 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/param.h b/arch/s390/include/asm/param.h index 34aaa4603347..c616821bf2ac 100644 --- a/arch/s390/include/asm/param.h +++ b/arch/s390/include/asm/param.h @@ -1,30 +1,6 @@ -/* - * include/asm-s390/param.h - * - * S390 version - * - * Derived from "include/asm-i386/param.h" - */ - #ifndef _ASMS390_PARAM_H #define _ASMS390_PARAM_H -#ifdef __KERNEL__ -# define HZ CONFIG_HZ /* Internal kernel timer frequency */ -# define USER_HZ 100 /* .. some user interfaces are in "ticks" */ -# define CLOCKS_PER_SEC (USER_HZ) /* like times() */ -#endif - -#ifndef HZ -#define HZ 100 -#endif - -#define EXEC_PAGESIZE 4096 - -#ifndef NOGROUP -#define NOGROUP (-1) -#endif - -#define MAXHOSTNAMELEN 64 /* max length of hostname */ +#include -#endif +#endif /* _ASMS390_PARAM_H */ -- cgit v1.2.1 From cc4707b31198d27baae650db1721a2f05701db37 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Mon, 4 Jan 2010 09:05:44 +0100 Subject: [S390] Update default configuration. Signed-off-by: Martin Schwidefsky --- arch/s390/defconfig | 86 ++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 75 insertions(+), 11 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/defconfig b/arch/s390/defconfig index f4e53c6708dc..b416aa11b91e 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.31 -# Tue Sep 22 17:43:13 2009 +# Linux kernel version: 2.6.33-rc2 +# Mon Jan 4 09:03:07 2010 # CONFIG_SCHED_MC=y CONFIG_MMU=y @@ -51,6 +51,7 @@ CONFIG_AUDIT=y # CONFIG_TREE_RCU=y # CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set # CONFIG_RCU_TRACE is not set CONFIG_RCU_FANOUT=64 # CONFIG_RCU_FANOUT_EXACT is not set @@ -113,7 +114,6 @@ CONFIG_HAVE_PERF_EVENTS=y # CONFIG_PERF_EVENTS is not set # CONFIG_PERF_COUNTERS is not set CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y # CONFIG_SLUB is not set @@ -149,21 +149,78 @@ CONFIG_STOP_MACHINE=y CONFIG_BLOCK=y CONFIG_BLK_DEV_BSG=y # CONFIG_BLK_DEV_INTEGRITY is not set +# CONFIG_BLK_CGROUP is not set CONFIG_BLOCK_COMPAT=y # # IO Schedulers # CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_AS=y CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_CFQ=y -# CONFIG_DEFAULT_AS is not set +# CONFIG_CFQ_GROUP_IOSCHED is not set CONFIG_DEFAULT_DEADLINE=y # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="deadline" CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_TRYLOCK=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_TRYLOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set CONFIG_FREEZER=y # @@ -227,14 +284,13 @@ CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG_SPARSE=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_PAGEFLAGS_EXTENDED=y -CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_SPLIT_PTLOCK_CPUS=999999 CONFIG_MIGRATION=y CONFIG_PHYS_ADDR_T_64BIT=y CONFIG_ZONE_DMA_FLAG=1 CONFIG_BOUNCE=y CONFIG_VIRT_TO_BUS=y -CONFIG_HAVE_MLOCK=y -CONFIG_HAVE_MLOCKED_PAGE_BIT=y +# CONFIG_KSM is not set CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 # @@ -339,6 +395,7 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=y CONFIG_INET6_XFRM_MODE_BEET=y # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set CONFIG_IPV6_NDISC_NODETYPE=y # CONFIG_IPV6_TUNNEL is not set # CONFIG_IPV6_MULTIPLE_TABLES is not set @@ -504,6 +561,10 @@ CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_COW_COMMON is not set CONFIG_BLK_DEV_LOOP=m # CONFIG_BLK_DEV_CRYPTOLOOP is not set + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# CONFIG_BLK_DEV_NBD=m # CONFIG_BLK_DEV_OSD is not set CONFIG_BLK_DEV_RAM=y @@ -710,7 +771,6 @@ CONFIG_S390_VMUR=m # CONFIG_PPS is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set # CONFIG_WATCHDOG is not set # CONFIG_REGULATOR is not set # CONFIG_MEMSTICK is not set @@ -864,6 +924,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=2048 CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set @@ -931,7 +992,6 @@ CONFIG_BRANCH_PROFILE_NONE=y CONFIG_SAMPLES=y # CONFIG_SAMPLE_KOBJECT is not set # CONFIG_SAMPLE_KPROBES is not set -# CONFIG_KMEMCHECK is not set # # Security options @@ -939,7 +999,11 @@ CONFIG_SAMPLES=y # CONFIG_KEYS is not set # CONFIG_SECURITY is not set # CONFIG_SECURITYFS is not set -# CONFIG_SECURITY_FILE_CAPABILITIES is not set +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_SMACK is not set +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" CONFIG_CRYPTO=y # -- cgit v1.2.1 From c3311c13adc1021e986fef12609ceb395ffc5014 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 13 Jan 2010 20:44:25 +0100 Subject: [S390] fix loading of PER control registers for utrace. If the current task enables / disables PER tracing for itself the PER control registers need to be loaded in FixPerRegisters. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/ptrace.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 13815d39f7dd..7cf464234419 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -65,6 +65,7 @@ FixPerRegisters(struct task_struct *task) { struct pt_regs *regs; per_struct *per_info; + per_cr_words cr_words; regs = task_pt_regs(task); per_info = (per_struct *) &task->thread.per_info; @@ -98,6 +99,13 @@ FixPerRegisters(struct task_struct *task) per_info->control_regs.bits.storage_alt_space_ctl = 1; else per_info->control_regs.bits.storage_alt_space_ctl = 0; + + if (task == current) { + __ctl_store(cr_words, 9, 11); + if (memcmp(&cr_words, &per_info->control_regs.words, + sizeof(cr_words)) != 0) + __ctl_load(per_info->control_regs.words, 9, 11); + } } void user_enable_single_step(struct task_struct *task) -- cgit v1.2.1 From f8d5faf718c9ff2c04eb8484585d4963c4111cd7 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 13 Jan 2010 20:44:26 +0100 Subject: [S390] clear TIF_SINGLE_STEP for new process. Clear the TIF_SINGLE_STEP bit in copy_thread. The new process did not get a PER event of its own. It is wrong deliver a SIGTRAP that was meant for the parent process. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/process.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 5417eb57271a..b98fe8e6e507 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -217,6 +217,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, p->thread.mm_segment = get_fs(); /* Don't copy debug registers */ memset(&p->thread.per_info, 0, sizeof(p->thread.per_info)); + clear_tsk_thread_flag(p, TIF_SINGLE_STEP); /* Initialize per thread user and system timer values */ ti = task_thread_info(p); ti->user_timer = 0; -- cgit v1.2.1 From 6f50248ef0efa7453397eb53e41e8aa5df534492 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 13 Jan 2010 20:44:27 +0100 Subject: [S390] duplicate SIGTRAP on signal delivery. The code in do_signal sets the TIF_SINGLE_STEP bit and calls tracehook_signal_handler after the signal frame has been set up. This causes two SIGTRAP signals to be delivered to the tracer. Stop setting the TIF_SINGLE_STEP bit in do_signal to get the correct number of SIGTRAPs. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/signal.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 6b4fef877f9d..1675c48b9145 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -499,19 +499,11 @@ void do_signal(struct pt_regs *regs) if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); - /* - * If we would have taken a single-step trap - * for a normal instruction, act like we took - * one for the handler setup. - */ - if (current->thread.per_info.single_step) - set_thread_flag(TIF_SINGLE_STEP); - /* * Let tracing know that we've done the handler setup. */ tracehook_signal_handler(signr, &info, &ka, regs, - test_thread_flag(TIF_SINGLE_STEP)); + current->thread.per_info.single_step); } return; } -- cgit v1.2.1 From bebf023d415fd8984994a596aaa83cd0a3046d0b Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 13 Jan 2010 20:44:28 +0100 Subject: [S390] remove superfluous TIF_USEDFPU bit The TIF_USEDFPU bit is always 0 for s390 and it is not tested anywhere. Remove the bit. At the same time remove the calls to clear_used_math() as well. The PF_USED_MATH bit is never set for s390 either. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/thread_info.h | 12 +++++------- arch/s390/kernel/process.c | 2 -- arch/s390/kernel/setup.c | 6 ------ 3 files changed, 5 insertions(+), 15 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 07eb61b2fb3a..66069e736842 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -93,13 +93,12 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ #define TIF_SECCOMP 10 /* secure computing */ #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ -#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ -#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling +#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ -#define TIF_31BIT 18 /* 32bit process */ -#define TIF_MEMDIE 19 -#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ -#define TIF_FREEZE 21 /* thread is freezing for suspend */ +#define TIF_31BIT 17 /* 32bit process */ +#define TIF_MEMDIE 18 +#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ +#define TIF_FREEZE 20 /* thread is freezing for suspend */ #define _TIF_NOTIFY_RESUME (1<active_mm = &init_mm; BUG_ON(current->mm); -- cgit v1.2.1 From a58c26bba9ebe97fea99aee125728b1f3886499e Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Wed, 13 Jan 2010 20:44:33 +0100 Subject: [S390] use helpers for rlimits Make sure compiler won't do weird things with limits. E.g. fetching them twice may return 2 different values after writable limits are implemented. I.e. either use rlimit helpers added in 3e10e716abf3c71bdb5d86b8f507f9e72236c9cd or ACCESS_ONCE if not applicable. Cc: Heiko Carstens Cc: linux390@de.ibm.com Cc: linux-s390@vger.kernel.org Signed-off-by: Jiri Slaby Signed-off-by: Martin Schwidefsky --- arch/s390/mm/mmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index f4558ccf02b9..869efbaed3ea 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -40,7 +40,7 @@ static inline unsigned long mmap_base(void) { - unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; + unsigned long gap = rlimit(RLIMIT_STACK); if (gap < MIN_GAP) gap = MIN_GAP; @@ -61,7 +61,7 @@ static inline int mmap_is_legacy(void) #endif return sysctl_legacy_va_layout || (current->personality & ADDR_COMPAT_LAYOUT) || - current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY; + rlimit(RLIMIT_STACK) == RLIM_INFINITY; } #ifndef CONFIG_64BIT -- cgit v1.2.1 From 02beaccc901b7a28ac1de79f3ed122f5fda220b1 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 13 Jan 2010 20:44:34 +0100 Subject: [S390] smp: setup smp_processor_id early smp_processor_id() is supposed to work before setup_arch() gets called. Before that smp_processor_id() may return just an arbitrary value that is contained in the uninitialized boot lowcore. So provide the arch function which will override the weak function in init/main.c. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/setup.c | 1 - arch/s390/kernel/smp.c | 6 ++++++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 2148ad3d490d..2d6a265b0145 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -849,7 +849,6 @@ setup_arch(char **cmdline_p) setup_lowcore(); cpu_init(); - __cpu_logical_map[0] = stap(); s390_init_cpu_topology(); /* diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 93e52039321b..eebce7fdc97c 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -717,6 +717,12 @@ void __init smp_cpus_done(unsigned int max_cpus) { } +void __init smp_setup_processor_id(void) +{ + S390_lowcore.cpu_nr = 0; + __cpu_logical_map[0] = stap(); +} + /* * the frequency of the profiling timer can be changed * by writing a multiplier value into /proc/profile. -- cgit v1.2.1 From c6a5f8cea2e5454fce3859ca5ed381c2535184cf Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 13 Jan 2010 20:44:35 +0100 Subject: [S390] smp: remove volatile type quilifier from __cpu_logical_map Remove pointless qualifier. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/sigp.h | 2 +- arch/s390/kernel/setup.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index ec403d4304f8..0cc620a6a2cc 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h @@ -19,7 +19,7 @@ #include /* get real cpu address from logical cpu number */ -extern volatile int __cpu_logical_map[]; +extern int __cpu_logical_map[]; typedef enum { diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 2d6a265b0145..3fe1680c3899 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -87,7 +87,7 @@ unsigned long elf_hwcap = 0; char elf_platform[ELF_PLATFORM_SIZE]; struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; -volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ +int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ int __initdata memory_end_set; unsigned long __initdata memory_end; -- cgit v1.2.1 From fb380aadfe34e8d3ce628cb3e386882351940874 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 13 Jan 2010 20:44:37 +0100 Subject: [S390] Move __cpu_logical_map to smp.c Finally move it to the place where it belongs to and make get rid of it for !CONFIG_SMP. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/sigp.h | 18 +++++++++++++----- arch/s390/kernel/setup.c | 1 - arch/s390/kernel/smp.c | 3 +++ arch/s390/kernel/topology.c | 2 +- arch/s390/lib/spinlock.c | 2 +- 5 files changed, 18 insertions(+), 8 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index 0cc620a6a2cc..f72d611f7e13 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h @@ -15,12 +15,20 @@ #ifndef __SIGP__ #define __SIGP__ -#include -#include +#include /* get real cpu address from logical cpu number */ extern int __cpu_logical_map[]; +static inline int cpu_logical_map(int cpu) +{ +#ifdef CONFIG_SMP + return __cpu_logical_map[cpu]; +#else + return stap(); +#endif +} + typedef enum { sigp_unassigned=0x0, @@ -79,7 +87,7 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code) " ipm %0\n" " srl %0,28\n" : "=d" (ccode) - : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]), + : "d" (reg1), "d" (cpu_logical_map(cpu_addr)), "a" (order_code) : "cc" , "memory"); return ccode; } @@ -98,7 +106,7 @@ signal_processor_p(__u32 parameter, __u16 cpu_addr, sigp_order_code order_code) " ipm %0\n" " srl %0,28\n" : "=d" (ccode) - : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]), + : "d" (reg1), "d" (cpu_logical_map(cpu_addr)), "a" (order_code) : "cc" , "memory"); return ccode; } @@ -118,7 +126,7 @@ signal_processor_ps(__u32 *statusptr, __u32 parameter, __u16 cpu_addr, " ipm %0\n" " srl %0,28\n" : "=d" (ccode), "+d" (reg1) - : "d" (__cpu_logical_map[cpu_addr]), "a" (order_code) + : "d" (cpu_logical_map(cpu_addr)), "a" (order_code) : "cc" , "memory"); *statusptr = reg1; return ccode; diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 3fe1680c3899..8d8957b38ab3 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -87,7 +87,6 @@ unsigned long elf_hwcap = 0; char elf_platform[ELF_PLATFORM_SIZE]; struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; -int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ int __initdata memory_end_set; unsigned long __initdata memory_end; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index eebce7fdc97c..76a6fdd46c45 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -52,6 +52,9 @@ #include #include "entry.h" +/* logical cpu to cpu address */ +int __cpu_logical_map[NR_CPUS]; + static struct task_struct *current_set[NR_CPUS]; static u8 smp_cpu_type; diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 3c72c9cf22b6..14ef6f05e432 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -114,7 +114,7 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; for_each_present_cpu(lcpu) { - if (__cpu_logical_map[lcpu] == rcpu) { + if (cpu_logical_map(lcpu) == rcpu) { cpu_set(lcpu, core->mask); smp_cpu_polarization[lcpu] = tl_cpu->pp; } diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 10754a375668..cff327f109a8 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -34,7 +34,7 @@ static inline void _raw_yield_cpu(int cpu) { if (MACHINE_HAS_DIAG9C) asm volatile("diag %0,0,0x9c" - : : "d" (__cpu_logical_map[cpu])); + : : "d" (cpu_logical_map(cpu))); else _raw_yield(); } -- cgit v1.2.1 From a9df8e325d0de527c2e97297704ebbec48c01cbf Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 13 Jan 2010 20:44:38 +0100 Subject: [S390] bug: implement arch specific __WARN macro This one will trap, generates shorter code and emits better debug data than the generic version. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/bug.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index efb74fd5156e..b1066b9fb5f8 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h @@ -52,6 +52,10 @@ unreachable(); \ } while (0) +#define __WARN() do { \ + __EMIT_BUG(BUGFLAG_WARNING); \ +} while (0) + #define WARN_ON(x) ({ \ int __ret_warn_on = !!(x); \ if (__builtin_constant_p(__ret_warn_on)) { \ -- cgit v1.2.1 From d381589834aa69f51f95b1e364fe79688692aab4 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 13 Jan 2010 20:44:39 +0100 Subject: [S390] mmap: add missing compat_ptr conversion to both mmap compat syscalls Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/compat_linux.c | 41 ++++++++++++++++------------------------- 1 file changed, 16 insertions(+), 25 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 22c9e557bb22..11c3aba664ea 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -616,44 +616,35 @@ asmlinkage long sys32_fstatat64(unsigned int dfd, char __user *filename, */ struct mmap_arg_struct_emu31 { - u32 addr; - u32 len; - u32 prot; - u32 flags; - u32 fd; - u32 offset; + compat_ulong_t addr; + compat_ulong_t len; + compat_ulong_t prot; + compat_ulong_t flags; + compat_ulong_t fd; + compat_ulong_t offset; }; -asmlinkage unsigned long -old32_mmap(struct mmap_arg_struct_emu31 __user *arg) +asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg) { struct mmap_arg_struct_emu31 a; - int error = -EFAULT; if (copy_from_user(&a, arg, sizeof(a))) - goto out; - - error = -EINVAL; + return -EFAULT; if (a.offset & ~PAGE_MASK) - goto out; - - error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, - a.offset >> PAGE_SHIFT); -out: - return error; + return -EINVAL; + a.addr = (unsigned long) compat_ptr(a.addr); + return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, + a.offset >> PAGE_SHIFT); } -asmlinkage long -sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg) +asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg) { struct mmap_arg_struct_emu31 a; - int error = -EFAULT; if (copy_from_user(&a, arg, sizeof(a))) - goto out; - error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); -out: - return error; + return -EFAULT; + a.addr = (unsigned long) compat_ptr(a.addr); + return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); } asmlinkage long sys32_read(unsigned int fd, char __user * buf, size_t count) -- cgit v1.2.1 From 94e587f61ef5da3b4da40289cdb7e9a62d455313 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 13 Jan 2010 20:44:42 +0100 Subject: [S390] unwire sys_recvmmsg again sys_recvmmsg is reachable via sys_socketcall. So unwire it again since there is no point in having two entry points for it. Also put it to the ignore list so we don't get reminded anymore in order to wire it up. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/unistd.h | 6 ++++-- arch/s390/kernel/compat_wrapper.S | 9 --------- arch/s390/kernel/syscalls.S | 1 - 3 files changed, 4 insertions(+), 12 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 192a7203a14f..6e9f049fa823 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -269,8 +269,7 @@ #define __NR_pwritev 329 #define __NR_rt_tgsigqueueinfo 330 #define __NR_perf_event_open 331 -#define __NR_recvmmsg 332 -#define NR_syscalls 333 +#define NR_syscalls 332 /* * There are some system calls that are not present on 64 bit, some @@ -377,6 +376,9 @@ #define __IGNORE_migrate_pages #define __IGNORE_move_pages +/* Ignore system calls that are also reachable via sys_socket */ +#define __IGNORE_recvmmsg + #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_ALARM diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index faeaccc7d7d9..30de2d0e52bb 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1853,12 +1853,3 @@ sys32_execve_wrapper: llgtr %r3,%r3 # compat_uptr_t * llgtr %r4,%r4 # compat_uptr_t * jg sys32_execve # branch to system call - - .globl compat_sys_recvmmsg_wrapper -compat_sys_recvmmsg_wrapper: - lgfr %r2,%r2 # int - llgtr %r3,%r3 # struct compat_mmsghdr * - llgfr %r4,%r4 # unsigned int - llgfr %r5,%r5 # unsigned int - llgtr %r6,%r6 # struct compat_timespec * - jg compat_sys_recvmmsg diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 4f292c936872..30eca070d426 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -340,4 +340,3 @@ SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper) -SYSCALL(sys_recvmmsg,sys_recvmmsg,compat_sys_recvmmsg_wrapper) -- cgit v1.2.1 From 062d5e9b0d714f449b261bb522eadaaf6f00f438 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 21 Jan 2010 12:19:07 +0100 Subject: KVM: S390: fix potential array overrun in intercept handling kvm_handle_sie_intercept uses a jump table to get the intercept handler for a SIE intercept. Static code analysis revealed a potential problem: the intercept_funcs jump table was defined to contain (0x48 >> 2) entries, but we only checked for code > 0x48 which would cause an off-by-one array overflow if code == 0x48. Use the compiler and ARRAY_SIZE to automatically set the limits. Cc: stable@kernel.org Signed-off-by: Christian Borntraeger Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/intercept.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index ba9d8a7bc1ac..b40096494e46 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -213,7 +213,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) return rc2; } -static const intercept_handler_t intercept_funcs[0x48 >> 2] = { +static const intercept_handler_t intercept_funcs[] = { [0x00 >> 2] = handle_noop, [0x04 >> 2] = handle_instruction, [0x08 >> 2] = handle_prog, @@ -230,7 +230,7 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) intercept_handler_t func; u8 code = vcpu->arch.sie_block->icptcode; - if (code & 3 || code > 0x48) + if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs)) return -ENOTSUPP; func = intercept_funcs[code >> 2]; if (func) -- cgit v1.2.1 From 428aecf67cf673d546627b2813bd4acabd20e3a9 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 27 Jan 2010 10:12:36 +0100 Subject: [S390] irqflags: add missing types.h include Add missing types.h include. Otherwise would cause build breakages on hw breakpoint support, because of undefined BITS_PER_LONG. Also fix up the copyright line and remove the superfluous __KERNEL__ ifdef. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/irqflags.h | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h index 3f26131120b7..c2fb432f576a 100644 --- a/arch/s390/include/asm/irqflags.h +++ b/arch/s390/include/asm/irqflags.h @@ -1,14 +1,12 @@ /* - * include/asm-s390/irqflags.h - * - * Copyright (C) IBM Corp. 2006 - * Author(s): Heiko Carstens + * Copyright IBM Corp. 2006,2010 + * Author(s): Martin Schwidefsky */ #ifndef __ASM_IRQFLAGS_H #define __ASM_IRQFLAGS_H -#ifdef __KERNEL__ +#include #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) @@ -102,5 +100,4 @@ static inline int raw_irqs_disabled_flags(unsigned long flags) /* For spinlocks etc */ #define raw_local_irq_save(x) ((x) = raw_local_irq_disable()) -#endif /* __KERNEL__ */ #endif /* __ASM_IRQFLAGS_H */ -- cgit v1.2.1 From 0b4d78903bf48fe6b125c4c9f0755437a4f21d47 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 27 Jan 2010 10:12:37 +0100 Subject: [S390] use set_current_state in sigsuspend Use set_current_state instead of a direct assignment to set the task state of the current process. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 1675c48b9145..6289945562b0 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -64,7 +64,7 @@ SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask) recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - current->state = TASK_INTERRUPTIBLE; + set_current_state(TASK_INTERRUPTIBLE); schedule(); set_thread_flag(TIF_RESTORE_SIGMASK); -- cgit v1.2.1 From 21ec7f6dbf10492ce9a21718040677d3e68bd57d Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 27 Jan 2010 10:12:40 +0100 Subject: [S390] fix single stepped svcs with TRACE_IRQFLAGS=y If irq flags tracing is enabled the TRACE_IRQS_ON macros expands to a function call which clobbers registers %r0-%r5. The macro is used in the code path for single stepped system calls. The argument registers %r2-%r6 need to be restored from the stack before the system call function is called. Cc: stable@kernel.org Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/entry.S | 1 + arch/s390/kernel/entry64.S | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 48215d15762b..e8ef21c51bbe 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -571,6 +571,7 @@ pgm_svcper: mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP TRACE_IRQS_ON + lm %r2,%r6,SP_R2(%r15) # load svc arguments stosm __SF_EMPTY(%r15),0x03 # reenable interrupts b BASED(sysc_do_svc) diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 9aff1d449b6e..f33658f09dd7 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -549,6 +549,7 @@ pgm_svcper: mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP TRACE_IRQS_ON + lmg %r2,%r6,SP_R2(%r15) # load svc arguments stosm __SF_EMPTY(%r15),0x03 # reenable interrupts j sysc_do_svc -- cgit v1.2.1 From 7717aefff3290c61e5f9e6aa39e9e1dc63cd4e81 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 9 Feb 2010 09:46:09 +0100 Subject: [S390] Fix struct _lowcore layout. Offsets and sizes are wrong for 32 bit. Got broken with 866ba284 "[S390] cleanup lowcore.h". Reported-by: Mahesh Salgaonkar Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/lowcore.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index f2ef4b619ce1..c25dfac7dd76 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -293,12 +293,12 @@ struct _lowcore __u64 clock_comparator; /* 0x02d0 */ __u32 machine_flags; /* 0x02d8 */ __u32 ftrace_func; /* 0x02dc */ - __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */ + __u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */ /* Interrupt response block */ __u8 irb[64]; /* 0x0300 */ - __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */ + __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */ /* * 0xe00 contains the address of the IPL Parameter Information -- cgit v1.2.1