summaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/debug.c8
-rw-r--r--arch/s390/kernel/dis.c1
-rw-r--r--arch/s390/kernel/early.c75
-rw-r--r--arch/s390/kernel/entry.S18
-rw-r--r--arch/s390/kernel/entry64.S6
-rw-r--r--arch/s390/kernel/ftrace.c36
-rw-r--r--arch/s390/kernel/head.S1
-rw-r--r--arch/s390/kernel/head31.S1
-rw-r--r--arch/s390/kernel/head64.S9
-rw-r--r--arch/s390/kernel/ipl.c181
-rw-r--r--arch/s390/kernel/mcount.S147
-rw-r--r--arch/s390/kernel/mcount64.S78
-rw-r--r--arch/s390/kernel/ptrace.c12
-rw-r--r--arch/s390/kernel/setup.c21
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c46
-rw-r--r--arch/s390/kernel/suspend.c73
-rw-r--r--arch/s390/kernel/swsusp_asm64.S184
-rw-r--r--arch/s390/kernel/time.c42
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S11
-rw-r--r--arch/s390/kernel/vmlinux.lds.S96
22 files changed, 690 insertions, 362 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index c75ed43b1a18..c7be8e10b87e 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -32,7 +32,7 @@ extra-y += head.o init_task.o vmlinux.lds
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o topology.o
-
+obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
@@ -41,7 +41,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
+obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o)
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index be8bceaf37d9..4c512561687d 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -63,8 +63,6 @@ typedef struct
} debug_sprintf_entry_t;
-extern void tod_to_timeval(uint64_t todval, struct timespec *xtime);
-
/* internal function prototyes */
static int debug_init(void);
@@ -1450,17 +1448,13 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
int area, debug_entry_t * entry, char *out_buf)
{
struct timespec time_spec;
- unsigned long long time;
char *except_str;
unsigned long caller;
int rc = 0;
unsigned int level;
level = entry->id.fields.level;
- time = entry->id.stck;
- /* adjust todclock to 1970 */
- time -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
- tod_to_timeval(time, &time_spec);
+ stck_to_timespec(entry->id.stck, &time_spec);
if (entry->id.fields.exception)
except_str = "*";
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index d2f270c995d9..db943a7ec513 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -15,7 +15,6 @@
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index f9b144049dc9..bf8b4ae7ff2d 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -6,6 +6,9 @@
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
+#define KMSG_COMPONENT "setup"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -16,6 +19,7 @@
#include <linux/module.h>
#include <linux/pfn.h>
#include <linux/uaccess.h>
+#include <linux/kernel.h>
#include <asm/ebcdic.h>
#include <asm/ipl.h>
#include <asm/lowcore.h>
@@ -35,8 +39,6 @@
char kernel_nss_name[NSS_NAME_SIZE + 1];
-static unsigned long machine_flags;
-
static void __init setup_boot_command_line(void);
/*
@@ -81,6 +83,8 @@ asm(
" br 14\n"
" .size savesys_ipl_nss, .-savesys_ipl_nss\n");
+static __initdata char upper_command_line[COMMAND_LINE_SIZE];
+
static noinline __init void create_kernel_nss(void)
{
unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
@@ -90,7 +94,6 @@ static noinline __init void create_kernel_nss(void)
int response;
size_t len;
char *savesys_ptr;
- char upper_command_line[COMMAND_LINE_SIZE];
char defsys_cmd[DEFSYS_CMD_SIZE];
char savesys_cmd[SAVESYS_CMD_SIZE];
@@ -141,6 +144,8 @@ static noinline __init void create_kernel_nss(void)
__cpcmd(defsys_cmd, NULL, 0, &response);
if (response != 0) {
+ pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
+ response);
kernel_nss_name[0] = '\0';
return;
}
@@ -153,8 +158,11 @@ static noinline __init void create_kernel_nss(void)
* max SAVESYS_CMD_SIZE
* On error: response contains the numeric portion of cp error message.
* for SAVESYS it will be >= 263
+ * for missing privilege class, it will be 1
*/
- if (response > SAVESYS_CMD_SIZE) {
+ if (response > SAVESYS_CMD_SIZE || response == 1) {
+ pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
+ response);
kernel_nss_name[0] = '\0';
return;
}
@@ -205,12 +213,12 @@ static noinline __init void detect_machine_type(void)
/* Running under KVM? If not we assume z/VM */
if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3))
- machine_flags |= MACHINE_FLAG_KVM;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
else
- machine_flags |= MACHINE_FLAG_VM;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}
-static void early_pgm_check_handler(void)
+static __init void early_pgm_check_handler(void)
{
unsigned long addr;
const struct exception_table_entry *fixup;
@@ -222,7 +230,7 @@ static void early_pgm_check_handler(void)
S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
}
-void setup_lowcore_early(void)
+static noinline __init void setup_lowcore_early(void)
{
psw_t psw;
@@ -242,7 +250,7 @@ static noinline __init void setup_hpage(void)
facilities = stfl();
if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29)))
return;
- machine_flags |= MACHINE_FLAG_HPAGE;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE;
__ctl_set_bit(0, 23);
#endif
}
@@ -260,7 +268,7 @@ static __init void detect_mvpg(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0");
if (!rc)
- machine_flags |= MACHINE_FLAG_MVPG;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG;
#endif
}
@@ -276,7 +284,7 @@ static __init void detect_ieee(void)
EX_TABLE(0b,1b)
: "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc");
if (!rc)
- machine_flags |= MACHINE_FLAG_IEEE;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE;
#endif
}
@@ -295,7 +303,7 @@ static __init void detect_csp(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2");
if (!rc)
- machine_flags |= MACHINE_FLAG_CSP;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_CSP;
#endif
}
@@ -312,7 +320,7 @@ static __init void detect_diag9c(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
if (!rc)
- machine_flags |= MACHINE_FLAG_DIAG9C;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
}
static __init void detect_diag44(void)
@@ -327,7 +335,7 @@ static __init void detect_diag44(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
if (!rc)
- machine_flags |= MACHINE_FLAG_DIAG44;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
#endif
}
@@ -338,11 +346,11 @@ static __init void detect_machine_facilities(void)
facilities = stfl();
if (facilities & (1 << 28))
- machine_flags |= MACHINE_FLAG_IDTE;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (facilities & (1 << 23))
- machine_flags |= MACHINE_FLAG_PFMF;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
if (facilities & (1 << 4))
- machine_flags |= MACHINE_FLAG_MVCOS;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
#endif
}
@@ -364,21 +372,35 @@ static __init void rescue_initrd(void)
}
/* Set up boot command line */
-static void __init setup_boot_command_line(void)
+static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
{
- char *parm = NULL;
+ char *parm, *delim;
+ size_t rc, len;
+ len = strlen(boot_command_line);
+
+ delim = boot_command_line + len; /* '\0' character position */
+ parm = boot_command_line + len + 1; /* append right after '\0' */
+
+ rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
+ if (rc) {
+ if (*parm == '=')
+ memmove(boot_command_line, parm + 1, rc);
+ else
+ *delim = ' '; /* replace '\0' with space */
+ }
+}
+
+static void __init setup_boot_command_line(void)
+{
/* copy arch command line */
strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
/* append IPL PARM data to the boot command line */
- if (MACHINE_IS_VM) {
- parm = boot_command_line + strlen(boot_command_line);
- *parm++ = ' ';
- get_ipl_vmparm(parm);
- if (parm[0] == '=')
- memmove(boot_command_line, parm + 1, strlen(parm));
- }
+ if (MACHINE_IS_VM)
+ append_to_cmdline(append_ipl_vmparm);
+
+ append_to_cmdline(append_ipl_scpdata);
}
@@ -410,7 +432,6 @@ void __init startup_init(void)
setup_hpage();
sclp_facilities_detect();
detect_memory_layout(memory_chunk);
- S390_lowcore.machine_flags = machine_flags;
#ifdef CONFIG_DYNAMIC_FTRACE
S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
#endif
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index c4c80a22bc1f..f43d2ee54464 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -54,7 +54,7 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING)
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
- _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8)
+ _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8)
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
@@ -278,7 +278,8 @@ sysc_return:
bnz BASED(sysc_work) # there is work to do (signals etc.)
sysc_restore:
#ifdef CONFIG_TRACE_IRQFLAGS
- la %r1,BASED(sysc_restore_trace_psw)
+ la %r1,BASED(sysc_restore_trace_psw_addr)
+ l %r1,0(%r1)
lpsw 0(%r1)
sysc_restore_trace:
TRACE_IRQS_CHECK
@@ -289,10 +290,15 @@ sysc_leave:
sysc_done:
#ifdef CONFIG_TRACE_IRQFLAGS
+sysc_restore_trace_psw_addr:
+ .long sysc_restore_trace_psw
+
+ .section .data,"aw",@progbits
.align 8
.globl sysc_restore_trace_psw
sysc_restore_trace_psw:
.long 0, sysc_restore_trace + 0x80000000
+ .previous
#endif
#
@@ -606,7 +612,8 @@ io_return:
bnz BASED(io_work) # there is work to do (signals etc.)
io_restore:
#ifdef CONFIG_TRACE_IRQFLAGS
- la %r1,BASED(io_restore_trace_psw)
+ la %r1,BASED(io_restore_trace_psw_addr)
+ l %r1,0(%r1)
lpsw 0(%r1)
io_restore_trace:
TRACE_IRQS_CHECK
@@ -617,10 +624,15 @@ io_leave:
io_done:
#ifdef CONFIG_TRACE_IRQFLAGS
+io_restore_trace_psw_addr:
+ .long io_restore_trace_psw
+
+ .section .data,"aw",@progbits
.align 8
.globl io_restore_trace_psw
io_restore_trace_psw:
.long 0, io_restore_trace + 0x80000000
+ .previous
#endif
#
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index f6618e9e15ef..a6f7b20df616 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -57,7 +57,7 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING)
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
- _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8)
+ _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8)
#define BASED(name) name-system_call(%r13)
@@ -284,10 +284,12 @@ sysc_leave:
sysc_done:
#ifdef CONFIG_TRACE_IRQFLAGS
+ .section .data,"aw",@progbits
.align 8
.globl sysc_restore_trace_psw
sysc_restore_trace_psw:
.quad 0, sysc_restore_trace
+ .previous
#endif
#
@@ -595,10 +597,12 @@ io_leave:
io_done:
#ifdef CONFIG_TRACE_IRQFLAGS
+ .section .data,"aw",@progbits
.align 8
.globl io_restore_trace_psw
io_restore_trace_psw:
.quad 0, io_restore_trace
+ .previous
#endif
#
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 3e298e64f0db..57bdcb1e3cdf 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -220,6 +220,29 @@ struct syscall_metadata *syscall_nr_to_meta(int nr)
return syscalls_metadata[nr];
}
+int syscall_name_to_nr(char *name)
+{
+ int i;
+
+ if (!syscalls_metadata)
+ return -1;
+ for (i = 0; i < NR_syscalls; i++)
+ if (syscalls_metadata[i])
+ if (!strcmp(syscalls_metadata[i]->name, name))
+ return i;
+ return -1;
+}
+
+void set_syscall_enter_id(int num, int id)
+{
+ syscalls_metadata[num]->enter_id = id;
+}
+
+void set_syscall_exit_id(int num, int id)
+{
+ syscalls_metadata[num]->exit_id = id;
+}
+
static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
{
struct syscall_metadata *start;
@@ -237,24 +260,19 @@ static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
return NULL;
}
-void arch_init_ftrace_syscalls(void)
+static int __init arch_init_ftrace_syscalls(void)
{
struct syscall_metadata *meta;
int i;
- static atomic_t refs;
-
- if (atomic_inc_return(&refs) != 1)
- goto out;
syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
GFP_KERNEL);
if (!syscalls_metadata)
- goto out;
+ return -ENOMEM;
for (i = 0; i < NR_syscalls; i++) {
meta = find_syscall_meta((unsigned long)sys_call_table[i]);
syscalls_metadata[i] = meta;
}
- return;
-out:
- atomic_dec(&refs);
+ return 0;
}
+arch_initcall(arch_init_ftrace_syscalls);
#endif
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index ec6882348520..c52b4f7742fa 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -27,6 +27,7 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
+#include <asm/cpu.h>
#ifdef CONFIG_64BIT
#define ARCH_OFFSET 4
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 2ced846065b7..602b508cd4c4 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -24,6 +24,7 @@ startup_continue:
# Setup stack
#
l %r15,.Linittu-.LPG1(%r13)
+ st %r15,__LC_THREAD_INFO # cache thread info in lowcore
mvc __LC_CURRENT(4),__TI_task(%r15)
ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
st %r15,__LC_KERNEL_STACK # set end of kernel stack
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 65667b2e65ce..6a250808092b 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -62,9 +62,9 @@ startup_continue:
clr %r11,%r12
je 5f # no more space in prefix array
4:
- ahi %r8,1 # next cpu (r8 += 1)
- cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ?
- jl 1b # jump if not last cpu
+ ahi %r8,1 # next cpu (r8 += 1)
+ chi %r8,MAX_CPU_ADDRESS # is last possible cpu ?
+ jle 1b # jump if not last cpu
5:
lhi %r1,2 # mode 2 = esame (dump)
j 6f
@@ -92,6 +92,7 @@ startup_continue:
# Setup stack
#
larl %r15,init_thread_union
+ stg %r15,__LC_THREAD_INFO # cache thread info in lowcore
lg %r14,__TI_task(%r15) # cache current in lowcore
stg %r14,__LC_CURRENT
aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
@@ -129,8 +130,6 @@ startup_continue:
#ifdef CONFIG_ZFCPDUMP
.Lcurrent_cpu:
.long 0x0
-.Llast_cpu:
- .long 0x0000ffff
.Lpref_arr_ptr:
.long zfcpdump_prefix_array
#endif /* CONFIG_ZFCPDUMP */
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index b8bf4b140065..ee57a42e6e93 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -70,6 +70,7 @@ struct shutdown_action {
char *name;
void (*fn) (struct shutdown_trigger *trigger);
int (*init) (void);
+ int init_rc;
};
static char *ipl_type_str(enum ipl_type type)
@@ -271,17 +272,18 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
/* VM IPL PARM routines */
-static void reipl_get_ascii_vmparm(char *dest,
+size_t reipl_get_ascii_vmparm(char *dest, size_t size,
const struct ipl_parameter_block *ipb)
{
int i;
- int len = 0;
+ size_t len;
char has_lowercase = 0;
+ len = 0;
if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
(ipb->ipl_info.ccw.vm_parm_len > 0)) {
- len = ipb->ipl_info.ccw.vm_parm_len;
+ len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
/* If at least one character is lowercase, we assume mixed
* case; otherwise we convert everything to lowercase.
@@ -298,14 +300,20 @@ static void reipl_get_ascii_vmparm(char *dest,
EBCASC(dest, len);
}
dest[len] = 0;
+
+ return len;
}
-void get_ipl_vmparm(char *dest)
+size_t append_ipl_vmparm(char *dest, size_t size)
{
+ size_t rc;
+
+ rc = 0;
if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
- reipl_get_ascii_vmparm(dest, &ipl_block);
+ rc = reipl_get_ascii_vmparm(dest, size, &ipl_block);
else
dest[0] = 0;
+ return rc;
}
static ssize_t ipl_vm_parm_show(struct kobject *kobj,
@@ -313,10 +321,65 @@ static ssize_t ipl_vm_parm_show(struct kobject *kobj,
{
char parm[DIAG308_VMPARM_SIZE + 1] = {};
- get_ipl_vmparm(parm);
+ append_ipl_vmparm(parm, sizeof(parm));
return sprintf(page, "%s\n", parm);
}
+static size_t scpdata_length(const char* buf, size_t count)
+{
+ while (count) {
+ if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
+ break;
+ count--;
+ }
+ return count;
+}
+
+size_t reipl_append_ascii_scpdata(char *dest, size_t size,
+ const struct ipl_parameter_block *ipb)
+{
+ size_t count;
+ size_t i;
+ int has_lowercase;
+
+ count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
+ ipb->ipl_info.fcp.scp_data_len));
+ if (!count)
+ goto out;
+
+ has_lowercase = 0;
+ for (i = 0; i < count; i++) {
+ if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
+ count = 0;
+ goto out;
+ }
+ if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
+ has_lowercase = 1;
+ }
+
+ if (has_lowercase)
+ memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
+ else
+ for (i = 0; i < count; i++)
+ dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
+out:
+ dest[count] = '\0';
+ return count;
+}
+
+size_t append_ipl_scpdata(char *dest, size_t len)
+{
+ size_t rc;
+
+ rc = 0;
+ if (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP)
+ rc = reipl_append_ascii_scpdata(dest, len, &ipl_block);
+ else
+ dest[0] = 0;
+ return rc;
+}
+
+
static struct kobj_attribute sys_ipl_vm_parm_attr =
__ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL);
@@ -552,7 +615,7 @@ static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
{
char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
- reipl_get_ascii_vmparm(vmparm, ipb);
+ reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
return sprintf(page, "%s\n", vmparm);
}
@@ -625,6 +688,59 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
/* FCP reipl device attributes */
+static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ size_t size = reipl_block_fcp->ipl_info.fcp.scp_data_len;
+ void *scp_data = reipl_block_fcp->ipl_info.fcp.scp_data;
+
+ return memory_read_from_buffer(buf, count, &off, scp_data, size);
+}
+
+static ssize_t reipl_fcp_scpdata_write(struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ size_t padding;
+ size_t scpdata_len;
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off >= DIAG308_SCPDATA_SIZE)
+ return -ENOSPC;
+
+ if (count > DIAG308_SCPDATA_SIZE - off)
+ count = DIAG308_SCPDATA_SIZE - off;
+
+ memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count);
+ scpdata_len = off + count;
+
+ if (scpdata_len % 8) {
+ padding = 8 - (scpdata_len % 8);
+ memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
+ 0, padding);
+ scpdata_len += padding;
+ }
+
+ reipl_block_fcp->ipl_info.fcp.scp_data_len = scpdata_len;
+ reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN + scpdata_len;
+ reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN + scpdata_len;
+
+ return count;
+}
+
+static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
+ .attr = {
+ .name = "scp_data",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .size = PAGE_SIZE,
+ .read = reipl_fcp_scpdata_read,
+ .write = reipl_fcp_scpdata_write,
+};
+
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
reipl_block_fcp->ipl_info.fcp.wwpn);
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n",
@@ -646,7 +762,6 @@ static struct attribute *reipl_fcp_attrs[] = {
};
static struct attribute_group reipl_fcp_attr_group = {
- .name = IPL_FCP_STR,
.attrs = reipl_fcp_attrs,
};
@@ -894,6 +1009,7 @@ static struct kobj_attribute reipl_type_attr =
__ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
static struct kset *reipl_kset;
+static struct kset *reipl_fcp_kset;
static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
const enum ipl_method m)
@@ -905,7 +1021,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
reipl_get_ascii_loadparm(loadparm, ipb);
reipl_get_ascii_nss_name(nss_name, ipb);
- reipl_get_ascii_vmparm(vmparm, ipb);
+ reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
switch (m) {
case REIPL_METHOD_CCW_VM:
@@ -1075,23 +1191,44 @@ static int __init reipl_fcp_init(void)
int rc;
if (!diag308_set_works) {
- if (ipl_info.type == IPL_TYPE_FCP)
+ if (ipl_info.type == IPL_TYPE_FCP) {
make_attrs_ro(reipl_fcp_attrs);
- else
+ sys_reipl_fcp_scp_data_attr.attr.mode = S_IRUGO;
+ } else
return 0;
}
reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_fcp)
return -ENOMEM;
- rc = sysfs_create_group(&reipl_kset->kobj, &reipl_fcp_attr_group);
+
+ /* sysfs: create fcp kset for mixing attr group and bin attrs */
+ reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL,
+ &reipl_kset->kobj);
+ if (!reipl_kset) {
+ free_page((unsigned long) reipl_block_fcp);
+ return -ENOMEM;
+ }
+
+ rc = sysfs_create_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
+ if (rc) {
+ kset_unregister(reipl_fcp_kset);
+ free_page((unsigned long) reipl_block_fcp);
+ return rc;
+ }
+
+ rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj,
+ &sys_reipl_fcp_scp_data_attr);
if (rc) {
- free_page((unsigned long)reipl_block_fcp);
+ sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
+ kset_unregister(reipl_fcp_kset);
+ free_page((unsigned long) reipl_block_fcp);
return rc;
}
- if (ipl_info.type == IPL_TYPE_FCP) {
+
+ if (ipl_info.type == IPL_TYPE_FCP)
memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
- } else {
+ else {
reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
@@ -1486,11 +1623,13 @@ static int set_trigger(const char *buf, struct shutdown_trigger *trigger,
int i;
for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
- if (!shutdown_actions_list[i])
- continue;
if (sysfs_streq(buf, shutdown_actions_list[i]->name)) {
- trigger->action = shutdown_actions_list[i];
- return len;
+ if (shutdown_actions_list[i]->init_rc) {
+ return shutdown_actions_list[i]->init_rc;
+ } else {
+ trigger->action = shutdown_actions_list[i];
+ return len;
+ }
}
}
return -EINVAL;
@@ -1640,8 +1779,8 @@ static void __init shutdown_actions_init(void)
for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
if (!shutdown_actions_list[i]->init)
continue;
- if (shutdown_actions_list[i]->init())
- shutdown_actions_list[i] = NULL;
+ shutdown_actions_list[i]->init_rc =
+ shutdown_actions_list[i]->init();
}
}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 2a0a5e97ba8c..dfe015d7398c 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -11,111 +11,27 @@
ftrace_stub:
br %r14
-#ifdef CONFIG_64BIT
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
.globl _mcount
_mcount:
- br %r14
-
- .globl ftrace_caller
-ftrace_caller:
- larl %r1,function_trace_stop
- icm %r1,0xf,0(%r1)
- bnzr %r14
- stmg %r2,%r5,32(%r15)
- stg %r14,112(%r15)
- lgr %r1,%r15
- aghi %r15,-160
- stg %r1,__SF_BACKCHAIN(%r15)
- lgr %r2,%r14
- lg %r3,168(%r15)
- larl %r14,ftrace_dyn_func
- lg %r14,0(%r14)
- basr %r14,%r14
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .globl ftrace_graph_caller
-ftrace_graph_caller:
- # This unconditional branch gets runtime patched. Change only if
- # you know what you are doing. See ftrace_enable_graph_caller().
- j 0f
- lg %r2,272(%r15)
- lg %r3,168(%r15)
- brasl %r14,prepare_ftrace_return
- stg %r2,168(%r15)
-0:
-#endif
- aghi %r15,160
- lmg %r2,%r5,32(%r15)
- lg %r14,112(%r15)
+#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
.data
.globl ftrace_dyn_func
ftrace_dyn_func:
- .quad ftrace_stub
+ .long ftrace_stub
.previous
-#else /* CONFIG_DYNAMIC_FTRACE */
-
- .globl _mcount
-_mcount:
- larl %r1,function_trace_stop
- icm %r1,0xf,0(%r1)
- bnzr %r14
- stmg %r2,%r5,32(%r15)
- stg %r14,112(%r15)
- lgr %r1,%r15
- aghi %r15,-160
- stg %r1,__SF_BACKCHAIN(%r15)
- lgr %r2,%r14
- lg %r3,168(%r15)
- larl %r14,ftrace_trace_function
- lg %r14,0(%r14)
- basr %r14,%r14
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- lg %r2,272(%r15)
- lg %r3,168(%r15)
- brasl %r14,prepare_ftrace_return
- stg %r2,168(%r15)
-#endif
- aghi %r15,160
- lmg %r2,%r5,32(%r15)
- lg %r14,112(%r15)
- br %r14
-
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
- .globl return_to_handler
-return_to_handler:
- stmg %r2,%r5,32(%r15)
- lgr %r1,%r15
- aghi %r15,-160
- stg %r1,__SF_BACKCHAIN(%r15)
- brasl %r14,ftrace_return_to_handler
- aghi %r15,160
- lgr %r14,%r2
- lmg %r2,%r5,32(%r15)
- br %r14
-
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-#else /* CONFIG_64BIT */
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
- .globl _mcount
-_mcount:
- br %r14
-
.globl ftrace_caller
ftrace_caller:
+#endif
stm %r2,%r5,16(%r15)
bras %r1,2f
+#ifdef CONFIG_DYNAMIC_FTRACE
+0: .long ftrace_dyn_func
+#else
0: .long ftrace_trace_function
+#endif
1: .long function_trace_stop
2: l %r2,1b-0b(%r1)
icm %r2,0xf,0(%r2)
@@ -131,53 +47,13 @@ ftrace_caller:
l %r14,0(%r14)
basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
.globl ftrace_graph_caller
ftrace_graph_caller:
# This unconditional branch gets runtime patched. Change only if
# you know what you are doing. See ftrace_enable_graph_caller().
j 1f
- bras %r1,0f
- .long prepare_ftrace_return
-0: l %r2,152(%r15)
- l %r4,0(%r1)
- l %r3,100(%r15)
- basr %r14,%r4
- st %r2,100(%r15)
-1:
#endif
- ahi %r15,96
- l %r14,56(%r15)
-3: lm %r2,%r5,16(%r15)
- br %r14
-
- .data
- .globl ftrace_dyn_func
-ftrace_dyn_func:
- .long ftrace_stub
- .previous
-
-#else /* CONFIG_DYNAMIC_FTRACE */
-
- .globl _mcount
-_mcount:
- stm %r2,%r5,16(%r15)
- bras %r1,2f
-0: .long ftrace_trace_function
-1: .long function_trace_stop
-2: l %r2,1b-0b(%r1)
- icm %r2,0xf,0(%r2)
- jnz 3f
- st %r14,56(%r15)
- lr %r0,%r15
- ahi %r15,-96
- l %r3,100(%r15)
- la %r2,0(%r14)
- st %r0,__SF_BACKCHAIN(%r15)
- la %r3,0(%r3)
- l %r14,0b-0b(%r1)
- l %r14,0(%r14)
- basr %r14,%r14
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
bras %r1,0f
.long prepare_ftrace_return
0: l %r2,152(%r15)
@@ -185,14 +61,13 @@ _mcount:
l %r3,100(%r15)
basr %r14,%r4
st %r2,100(%r15)
+1:
#endif
ahi %r15,96
l %r14,56(%r15)
3: lm %r2,%r5,16(%r15)
br %r14
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl return_to_handler
@@ -211,6 +86,4 @@ return_to_handler:
lm %r2,%r5,16(%r15)
br %r14
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-#endif /* CONFIG_64BIT */
+#endif
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
new file mode 100644
index 000000000000..c37211c6092b
--- /dev/null
+++ b/arch/s390/kernel/mcount64.S
@@ -0,0 +1,78 @@
+/*
+ * Copyright IBM Corp. 2008,2009
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *
+ */
+
+#include <asm/asm-offsets.h>
+
+ .globl ftrace_stub
+ftrace_stub:
+ br %r14
+
+ .globl _mcount
+_mcount:
+#ifdef CONFIG_DYNAMIC_FTRACE
+ br %r14
+
+ .data
+ .globl ftrace_dyn_func
+ftrace_dyn_func:
+ .quad ftrace_stub
+ .previous
+
+ .globl ftrace_caller
+ftrace_caller:
+#endif
+ larl %r1,function_trace_stop
+ icm %r1,0xf,0(%r1)
+ bnzr %r14
+ stmg %r2,%r5,32(%r15)
+ stg %r14,112(%r15)
+ lgr %r1,%r15
+ aghi %r15,-160
+ stg %r1,__SF_BACKCHAIN(%r15)
+ lgr %r2,%r14
+ lg %r3,168(%r15)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ larl %r14,ftrace_dyn_func
+#else
+ larl %r14,ftrace_trace_function
+#endif
+ lg %r14,0(%r14)
+ basr %r14,%r14
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+ .globl ftrace_graph_caller
+ftrace_graph_caller:
+ # This unconditional branch gets runtime patched. Change only if
+ # you know what you are doing. See ftrace_enable_graph_caller().
+ j 0f
+#endif
+ lg %r2,272(%r15)
+ lg %r3,168(%r15)
+ brasl %r14,prepare_ftrace_return
+ stg %r2,168(%r15)
+0:
+#endif
+ aghi %r15,160
+ lmg %r2,%r5,32(%r15)
+ lg %r14,112(%r15)
+ br %r14
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+ .globl return_to_handler
+return_to_handler:
+ stmg %r2,%r5,32(%r15)
+ lgr %r1,%r15
+ aghi %r15,-160
+ stg %r1,__SF_BACKCHAIN(%r15)
+ brasl %r14,ftrace_return_to_handler
+ aghi %r15,160
+ lgr %r14,%r2
+ lmg %r2,%r5,32(%r15)
+ br %r14
+
+#endif
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 490b39934d65..f3ddd7ac06c5 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -26,7 +26,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
@@ -52,6 +51,9 @@
#include "compat_ptrace.h"
#endif
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
enum s390_regset {
REGSET_GENERAL,
REGSET_FP,
@@ -662,8 +664,8 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
ret = -1;
}
- if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
- ftrace_syscall_enter(regs);
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->gprs[2]);
if (unlikely(current->audit_context))
audit_syscall_entry(is_compat_task() ?
@@ -680,8 +682,8 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
regs->gprs[2]);
- if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
- ftrace_syscall_exit(regs);
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->gprs[2]);
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 9717717c6fea..9ed13a1ed376 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -154,6 +154,16 @@ static int __init condev_setup(char *str)
__setup("condev=", condev_setup);
+static void __init set_preferred_console(void)
+{
+ if (MACHINE_IS_KVM)
+ add_preferred_console("hvc", 0, NULL);
+ else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
+ add_preferred_console("ttyS", 0, NULL);
+ else if (CONSOLE_IS_3270)
+ add_preferred_console("tty3270", 0, NULL);
+}
+
static int __init conmode_setup(char *str)
{
#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
@@ -168,6 +178,7 @@ static int __init conmode_setup(char *str)
if (strncmp(str, "3270", 5) == 0)
SET_CONSOLE_3270;
#endif
+ set_preferred_console();
return 1;
}
@@ -780,9 +791,6 @@ static void __init setup_hwcaps(void)
void __init
setup_arch(char **cmdline_p)
{
- /* set up preferred console */
- add_preferred_console("ttyS", 0, NULL);
-
/*
* print what head.S has found out about the machine
*/
@@ -802,11 +810,9 @@ setup_arch(char **cmdline_p)
if (MACHINE_IS_VM)
pr_info("Linux is running as a z/VM "
"guest operating system in 64-bit mode\n");
- else if (MACHINE_IS_KVM) {
+ else if (MACHINE_IS_KVM)
pr_info("Linux is running under KVM in 64-bit mode\n");
- add_preferred_console("hvc", 0, NULL);
- s390_virtio_console_init();
- } else
+ else
pr_info("Linux is running natively in 64-bit mode\n");
#endif /* CONFIG_64BIT */
@@ -851,6 +857,7 @@ setup_arch(char **cmdline_p)
/* Setup default console */
conmode_default();
+ set_preferred_console();
/* Setup zfcpdump support */
setup_zfcpdump(console_devno);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 062bd64e65fa..6b4fef877f9d 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -536,4 +536,6 @@ void do_notify_resume(struct pt_regs *regs)
{
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2270730f5354..56c16876b919 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -49,6 +49,7 @@
#include <asm/sclp.h>
#include <asm/cputime.h>
#include <asm/vdso.h>
+#include <asm/cpu.h>
#include "entry.h"
static struct task_struct *current_set[NR_CPUS];
@@ -70,6 +71,23 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
static void smp_ext_bitcall(int, ec_bit_sig);
+static int cpu_stopped(int cpu)
+{
+ __u32 status;
+
+ switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
+ case sigp_order_code_accepted:
+ case sigp_status_stored:
+ /* Check for stopped and check stop state */
+ if (status & 0x50)
+ return 1;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
void smp_send_stop(void)
{
int cpu, rc;
@@ -86,7 +104,7 @@ void smp_send_stop(void)
rc = signal_processor(cpu, sigp_stop);
} while (rc == sigp_busy);
- while (!smp_cpu_not_running(cpu))
+ while (!cpu_stopped(cpu))
cpu_relax();
}
}
@@ -269,19 +287,6 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
#endif /* CONFIG_ZFCPDUMP */
-static int cpu_stopped(int cpu)
-{
- __u32 status;
-
- /* Check for stopped state */
- if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
- sigp_status_stored) {
- if (status & 0x40)
- return 1;
- }
- return 0;
-}
-
static int cpu_known(int cpu_id)
{
int cpu;
@@ -300,7 +305,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
logical_cpu = cpumask_first(&avail);
if (logical_cpu >= nr_cpu_ids)
return 0;
- for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
+ for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
if (cpu_known(cpu_id))
continue;
__cpu_logical_map[logical_cpu] = cpu_id;
@@ -379,7 +384,7 @@ static void __init smp_detect_cpus(void)
/* Use sigp detection algorithm if sclp doesn't work. */
if (sclp_get_cpu_info(info)) {
smp_use_sigp_detection = 1;
- for (cpu = 0; cpu <= 65535; cpu++) {
+ for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
if (cpu == boot_cpu_addr)
continue;
__cpu_logical_map[CPU_INIT_NO] = cpu;
@@ -635,7 +640,7 @@ int __cpu_disable(void)
void __cpu_die(unsigned int cpu)
{
/* Wait until target cpu is down */
- while (!smp_cpu_not_running(cpu))
+ while (!cpu_stopped(cpu))
cpu_relax();
smp_free_lowcore(cpu);
pr_info("Processor %d stopped\n", cpu);
@@ -687,13 +692,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
lowcore->extended_save_area_addr = (u32) save_area;
-#else
- if (vdso_alloc_per_cpu(smp_processor_id(), lowcore))
- BUG();
#endif
set_prefix((u32)(unsigned long) lowcore);
local_mcck_enable();
local_irq_enable();
+#ifdef CONFIG_64BIT
+ if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
+ BUG();
+#endif
for_each_possible_cpu(cpu)
if (cpu != smp_processor_id())
smp_create_idle(cpu);
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
new file mode 100644
index 000000000000..086bee970cae
--- /dev/null
+++ b/arch/s390/kernel/suspend.c
@@ -0,0 +1,73 @@
+/*
+ * Suspend support specific for s390.
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ */
+
+#include <linux/suspend.h>
+#include <linux/reboot.h>
+#include <linux/pfn.h>
+#include <linux/mm.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#include <asm/ipl.h>
+
+/*
+ * References to section boundaries
+ */
+extern const void __nosave_begin, __nosave_end;
+
+/*
+ * check if given pfn is in the 'nosave' or in the read only NSS section
+ */
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end))
+ >> PAGE_SHIFT;
+ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
+ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+
+ if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
+ return 1;
+ if (pfn >= stext_pfn && pfn <= eshared_pfn) {
+ if (ipl_info.type == IPL_TYPE_NSS)
+ return 1;
+ } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0))
+ return 1;
+ return 0;
+}
+
+void save_processor_state(void)
+{
+ /* swsusp_arch_suspend() actually saves all cpu register contents.
+ * Machine checks must be disabled since swsusp_arch_suspend() stores
+ * register contents to their lowcore save areas. That's the same
+ * place where register contents on machine checks would be saved.
+ * To avoid register corruption disable machine checks.
+ * We must also disable machine checks in the new psw mask for
+ * program checks, since swsusp_arch_suspend() may generate program
+ * checks. Disabling machine checks for all other new psw masks is
+ * just paranoia.
+ */
+ local_mcck_disable();
+ /* Disable lowcore protection */
+ __ctl_clear_bit(0,28);
+ S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK;
+}
+
+void restore_processor_state(void)
+{
+ S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK;
+ /* Enable lowcore protection */
+ __ctl_set_bit(0,28);
+ local_mcck_enable();
+}
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
new file mode 100644
index 000000000000..7cd6b096f0d1
--- /dev/null
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -0,0 +1,184 @@
+/*
+ * S390 64-bit swsusp implementation
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ * Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * Save register context in absolute 0 lowcore and call swsusp_save() to
+ * create in-memory kernel image. The context is saved in the designated
+ * "store status" memory locations (see POP).
+ * We return from this function twice. The first time during the suspend to
+ * disk process. The second time via the swsusp_arch_resume() function
+ * (see below) in the resume process.
+ * This function runs with disabled interrupts.
+ */
+ .section .text
+ .align 4
+ .globl swsusp_arch_suspend
+swsusp_arch_suspend:
+ stmg %r6,%r15,__SF_GPRS(%r15)
+ lgr %r1,%r15
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ stg %r1,__SF_BACKCHAIN(%r15)
+
+ /* Deactivate DAT */
+ stnsm __SF_EMPTY(%r15),0xfb
+
+ /* Store prefix register on stack */
+ stpx __SF_EMPTY(%r15)
+
+ /* Save prefix register contents for lowcore */
+ llgf %r4,__SF_EMPTY(%r15)
+
+ /* Get pointer to save area */
+ lghi %r1,0x1000
+
+ /* Store registers */
+ mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
+ stfpc 0x31c(%r1) /* store fpu control */
+ std 0,0x200(%r1) /* store f0 */
+ std 1,0x208(%r1) /* store f1 */
+ std 2,0x210(%r1) /* store f2 */
+ std 3,0x218(%r1) /* store f3 */
+ std 4,0x220(%r1) /* store f4 */
+ std 5,0x228(%r1) /* store f5 */
+ std 6,0x230(%r1) /* store f6 */
+ std 7,0x238(%r1) /* store f7 */
+ std 8,0x240(%r1) /* store f8 */
+ std 9,0x248(%r1) /* store f9 */
+ std 10,0x250(%r1) /* store f10 */
+ std 11,0x258(%r1) /* store f11 */
+ std 12,0x260(%r1) /* store f12 */
+ std 13,0x268(%r1) /* store f13 */
+ std 14,0x270(%r1) /* store f14 */
+ std 15,0x278(%r1) /* store f15 */
+ stam %a0,%a15,0x340(%r1) /* store access registers */
+ stctg %c0,%c15,0x380(%r1) /* store control registers */
+ stmg %r0,%r15,0x280(%r1) /* store general registers */
+
+ stpt 0x328(%r1) /* store timer */
+ stckc 0x330(%r1) /* store clock comparator */
+
+ /* Activate DAT */
+ stosm __SF_EMPTY(%r15),0x04
+
+ /* Set prefix page to zero */
+ xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
+ spx __SF_EMPTY(%r15)
+
+ lghi %r2,0
+ lghi %r3,2*PAGE_SIZE
+ lghi %r5,2*PAGE_SIZE
+1: mvcle %r2,%r4,0
+ jo 1b
+
+ /* Save image */
+ brasl %r14,swsusp_save
+
+ /* Restore prefix register and return */
+ lghi %r1,0x1000
+ spx 0x318(%r1)
+ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+ lghi %r2,0
+ br %r14
+
+/*
+ * Restore saved memory image to correct place and restore register context.
+ * Then we return to the function that called swsusp_arch_suspend().
+ * swsusp_arch_resume() runs with disabled interrupts.
+ */
+ .globl swsusp_arch_resume
+swsusp_arch_resume:
+ stmg %r6,%r15,__SF_GPRS(%r15)
+ lgr %r1,%r15
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ stg %r1,__SF_BACKCHAIN(%r15)
+
+#ifdef CONFIG_SMP
+ /* Save boot cpu number */
+ brasl %r14,smp_get_phys_cpu_id
+ lgr %r10,%r2
+#endif
+ /* Deactivate DAT */
+ stnsm __SF_EMPTY(%r15),0xfb
+
+ /* Set prefix page to zero */
+ xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
+ spx __SF_EMPTY(%r15)
+
+ /* Restore saved image */
+ larl %r1,restore_pblist
+ lg %r1,0(%r1)
+ ltgr %r1,%r1
+ jz 2f
+0:
+ lg %r2,8(%r1)
+ lg %r4,0(%r1)
+ lghi %r3,PAGE_SIZE
+ lghi %r5,PAGE_SIZE
+1:
+ mvcle %r2,%r4,0
+ jo 1b
+ lg %r1,16(%r1)
+ ltgr %r1,%r1
+ jnz 0b
+2:
+ ptlb /* flush tlb */
+
+ /* Restore registers */
+ lghi %r13,0x1000 /* %r1 = pointer to save arae */
+
+ spt 0x328(%r13) /* reprogram timer */
+ //sckc 0x330(%r13) /* set clock comparator */
+
+ lctlg %c0,%c15,0x380(%r13) /* load control registers */
+ lam %a0,%a15,0x340(%r13) /* load access registers */
+
+ lfpc 0x31c(%r13) /* load fpu control */
+ ld 0,0x200(%r13) /* load f0 */
+ ld 1,0x208(%r13) /* load f1 */
+ ld 2,0x210(%r13) /* load f2 */
+ ld 3,0x218(%r13) /* load f3 */
+ ld 4,0x220(%r13) /* load f4 */
+ ld 5,0x228(%r13) /* load f5 */
+ ld 6,0x230(%r13) /* load f6 */
+ ld 7,0x238(%r13) /* load f7 */
+ ld 8,0x240(%r13) /* load f8 */
+ ld 9,0x248(%r13) /* load f9 */
+ ld 10,0x250(%r13) /* load f10 */
+ ld 11,0x258(%r13) /* load f11 */
+ ld 12,0x260(%r13) /* load f12 */
+ ld 13,0x268(%r13) /* load f13 */
+ ld 14,0x270(%r13) /* load f14 */
+ ld 15,0x278(%r13) /* load f15 */
+
+ /* Load old stack */
+ lg %r15,0x2f8(%r13)
+
+ /* Pointer to save area */
+ lghi %r13,0x1000
+
+#ifdef CONFIG_SMP
+ /* Switch CPUs */
+ lgr %r2,%r10 /* get cpu id */
+ llgf %r3,0x318(%r13)
+ brasl %r14,smp_switch_boot_cpu_in_resume
+#endif
+ /* Restore prefix register */
+ spx 0x318(%r13)
+
+ /* Activate DAT */
+ stosm __SF_EMPTY(%r15),0x04
+
+ /* Return 0 */
+ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+ lghi %r2,0
+ br %r14
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d4c8e9c47c81..34162a0b2caa 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -60,6 +60,7 @@
#define TICK_SIZE tick
u64 sched_clock_base_cc = -1; /* Force to data section. */
+EXPORT_SYMBOL_GPL(sched_clock_base_cc);
static DEFINE_PER_CPU(struct clock_event_device, comparators);
@@ -68,7 +69,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
*/
unsigned long long notrace sched_clock(void)
{
- return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9;
+ return (get_clock_monotonic() * 125) >> 9;
}
/*
@@ -90,6 +91,7 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime)
todval -= (sec * 1000000) << 12;
xtime->tv_nsec = ((todval * 1000) >> 12);
}
+EXPORT_SYMBOL(tod_to_timeval);
void clock_comparator_work(void)
{
@@ -182,12 +184,14 @@ static void timing_alert_interrupt(__u16 code)
static void etr_reset(void);
static void stp_reset(void);
-unsigned long read_persistent_clock(void)
+void read_persistent_clock(struct timespec *ts)
{
- struct timespec ts;
+ tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts);
+}
- tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, &ts);
- return ts.tv_sec;
+void read_boot_clock(struct timespec *ts)
+{
+ tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, ts);
}
static cycle_t read_tod_clock(struct clocksource *cs)
@@ -205,6 +209,10 @@ static struct clocksource clocksource_tod = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+struct clocksource * __init clocksource_default_clock(void)
+{
+ return &clocksource_tod;
+}
void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
{
@@ -242,10 +250,6 @@ void update_vsyscall_tz(void)
*/
void __init time_init(void)
{
- struct timespec ts;
- unsigned long flags;
- cycle_t now;
-
/* Reset time synchronization interfaces. */
etr_reset();
stp_reset();
@@ -261,26 +265,6 @@ void __init time_init(void)
if (clocksource_register(&clocksource_tod) != 0)
panic("Could not register TOD clock source");
- /*
- * The TOD clock is an accurate clock. The xtime should be
- * initialized in a way that the difference between TOD and
- * xtime is reasonably small. Too bad that timekeeping_init
- * sets xtime.tv_nsec to zero. In addition the clock source
- * change from the jiffies clock source to the TOD clock
- * source add another error of up to 1/HZ second. The same
- * function sets wall_to_monotonic to a value that is too
- * small for /proc/uptime to be accurate.
- * Reset xtime and wall_to_monotonic to sane values.
- */
- write_seqlock_irqsave(&xtime_lock, flags);
- now = get_clock();
- tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime);
- clocksource_tod.cycle_last = now;
- clocksource_tod.raw_time = xtime;
- tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts);
- set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec);
- write_sequnlock_irqrestore(&xtime_lock, flags);
-
/* Enable TOD clock interrupts on the boot cpu. */
init_cpu_timer();
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 79dbfee831ec..49106c6e6f88 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -88,10 +88,17 @@ __kernel_clock_gettime:
llilh %r4,0x0100
sar %a4,%r4
lghi %r4,0
+ epsw %r5,0
sacf 512 /* Magic ectg instruction */
.insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4
- sacf 0
- sar %a4,%r2
+ tml %r5,0x4000
+ jo 11f
+ tml %r5,0x8000
+ jno 10f
+ sacf 256
+ j 11f
+10: sacf 0
+11: sar %a4,%r2
algr %r1,%r0 /* r1 = cputime as TOD value */
mghi %r1,1000 /* convert to nanoseconds */
srlg %r1,%r1,12 /* r1 = cputime in nanosec */
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index a53db23ee092..bc15ef93e656 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -52,55 +52,18 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_eshared = .; /* End of shareable data */
- . = ALIGN(16); /* Exception table */
- __ex_table : {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- } :data
-
- .data : { /* Data */
- DATA_DATA
- CONSTRUCTORS
- }
-
- . = ALIGN(PAGE_SIZE);
- .data_nosave : {
- __nosave_begin = .;
- *(.data.nosave)
- }
- . = ALIGN(PAGE_SIZE);
- __nosave_end = .;
-
- . = ALIGN(PAGE_SIZE);
- .data.page_aligned : {
- *(.data.idt)
- }
+ EXCEPTION_TABLE(16) :data
- . = ALIGN(0x100);
- .data.cacheline_aligned : {
- *(.data.cacheline_aligned)
- }
+ RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE)
- . = ALIGN(0x100);
- .data.read_mostly : {
- *(.data.read_mostly)
- }
_edata = .; /* End of data section */
- . = ALIGN(THREAD_SIZE); /* init_task */
- .data.init_task : {
- *(.data.init_task)
- }
-
/* will be freed after init */
. = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
+
+ INIT_TEXT_SECTION(PAGE_SIZE)
+
/*
* .exit.text is discarded at runtime, not link time,
* to deal with references from __bug_table
@@ -111,59 +74,20 @@ SECTIONS
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
- .init.data : {
- INIT_DATA
- }
- . = ALIGN(0x100);
- .init.setup : {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- }
- .initcall.init : {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- }
-
- .con_initcall.init : {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
- }
- SECURITY_INIT
-
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(0x100);
- .init.ramfs : {
- __initramfs_start = .;
- *(.init.ramfs)
- . = ALIGN(2);
- __initramfs_end = .;
- }
-#endif
+ INIT_DATA_SECTION(0x100)
PERCPU(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */
- /* BSS */
- .bss : {
- __bss_start = .;
- *(.bss)
- . = ALIGN(2);
- __bss_stop = .;
- }
+ BSS_SECTION(0, 2, 0)
_end = . ;
- /* Sections to be discarded */
- /DISCARD/ : {
- EXIT_DATA
- *(.exitcall.exit)
- }
-
/* Debugging sections. */
STABS_DEBUG
DWARF_DEBUG
+
+ /* Sections to be discarded */
+ DISCARDS
}
OpenPOWER on IntegriCloud