summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/kernel/ivt.S1
-rw-r--r--arch/ia64/kernel/mca.c98
-rw-r--r--arch/ia64/kernel/mca_drv.c22
-rw-r--r--arch/ia64/kernel/mca_drv.h7
-rw-r--r--arch/ia64/kernel/mca_drv_asm.S13
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S9
-rw-r--r--include/asm-ia64/asmmacro.h11
7 files changed, 120 insertions, 41 deletions
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index dcd906fe5749..829a43cab797 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -865,6 +865,7 @@ ENTRY(interrupt)
;;
SAVE_REST
;;
+ MCA_RECOVER_RANGE(interrupt)
alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
mov out0=cr.ivr // pass cr.ivr as first arg
add out1=16,sp // pass pointer to pt_regs as second arg
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index cedcae713e9f..87ff7fe33cfb 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -83,6 +83,7 @@
#include <asm/irq.h>
#include <asm/hw_irq.h>
+#include "mca_drv.h"
#include "entry.h"
#if defined(IA64_MCA_DEBUG_INFO)
@@ -281,6 +282,50 @@ ia64_mca_log_sal_error_record(int sal_info_type)
ia64_sal_clear_state_info(sal_info_type);
}
+/*
+ * search_mca_table
+ * See if the MCA surfaced in an instruction range
+ * that has been tagged as recoverable.
+ *
+ * Inputs
+ * first First address range to check
+ * last Last address range to check
+ * ip Instruction pointer, address we are looking for
+ *
+ * Return value:
+ * 1 on Success (in the table)/ 0 on Failure (not in the table)
+ */
+int
+search_mca_table (const struct mca_table_entry *first,
+ const struct mca_table_entry *last,
+ unsigned long ip)
+{
+ const struct mca_table_entry *curr;
+ u64 curr_start, curr_end;
+
+ curr = first;
+ while (curr <= last) {
+ curr_start = (u64) &curr->start_addr + curr->start_addr;
+ curr_end = (u64) &curr->end_addr + curr->end_addr;
+
+ if ((ip >= curr_start) && (ip <= curr_end)) {
+ return 1;
+ }
+ curr++;
+ }
+ return 0;
+}
+
+/* Given an address, look for it in the mca tables. */
+int mca_recover_range(unsigned long addr)
+{
+ extern struct mca_table_entry __start___mca_table[];
+ extern struct mca_table_entry __stop___mca_table[];
+
+ return search_mca_table(__start___mca_table, __stop___mca_table-1, addr);
+}
+EXPORT_SYMBOL_GPL(mca_recover_range);
+
#ifdef CONFIG_ACPI
int cpe_vector = -1;
@@ -747,31 +792,34 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
ia64_mca_modify_comm(previous_current);
goto no_mod;
}
- if (r13 != sos->prev_IA64_KR_CURRENT) {
- msg = "inconsistent previous current and r13";
- goto no_mod;
- }
- if ((r12 - r13) >= KERNEL_STACK_SIZE) {
- msg = "inconsistent r12 and r13";
- goto no_mod;
- }
- if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
- msg = "inconsistent ar.bspstore and r13";
- goto no_mod;
- }
- va.p = old_bspstore;
- if (va.f.reg < 5) {
- msg = "old_bspstore is in the wrong region";
- goto no_mod;
- }
- if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
- msg = "inconsistent ar.bsp and r13";
- goto no_mod;
- }
- size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
- if (ar_bspstore + size > r12) {
- msg = "no room for blocked state";
- goto no_mod;
+
+ if (!mca_recover_range(ms->pmsa_iip)) {
+ if (r13 != sos->prev_IA64_KR_CURRENT) {
+ msg = "inconsistent previous current and r13";
+ goto no_mod;
+ }
+ if ((r12 - r13) >= KERNEL_STACK_SIZE) {
+ msg = "inconsistent r12 and r13";
+ goto no_mod;
+ }
+ if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
+ msg = "inconsistent ar.bspstore and r13";
+ goto no_mod;
+ }
+ va.p = old_bspstore;
+ if (va.f.reg < 5) {
+ msg = "old_bspstore is in the wrong region";
+ goto no_mod;
+ }
+ if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
+ msg = "inconsistent ar.bsp and r13";
+ goto no_mod;
+ }
+ size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
+ if (ar_bspstore + size > r12) {
+ msg = "no room for blocked state";
+ goto no_mod;
+ }
}
ia64_mca_modify_comm(previous_current);
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index e883d85906db..37c88eb55873 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -6,6 +6,7 @@
* Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com)
* Copyright (C) 2005 Silicon Graphics, Inc
* Copyright (C) 2005 Keith Owens <kaos@sgi.com>
+ * Copyright (C) 2006 Russ Anderson <rja@sgi.com>
*/
#include <linux/config.h>
#include <linux/types.h>
@@ -121,11 +122,12 @@ mca_page_isolate(unsigned long paddr)
*/
void
-mca_handler_bh(unsigned long paddr)
+mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
{
- printk(KERN_ERR
- "OS_MCA: process [pid: %d](%s) encounters MCA (paddr=%lx)\n",
- current->pid, current->comm, paddr);
+ printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, "
+ "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n",
+ raw_smp_processor_id(), current->pid, current->uid,
+ iip, ipsr, paddr, current->comm);
spin_lock(&mca_bh_lock);
switch (mca_page_isolate(paddr)) {
@@ -442,21 +444,26 @@ recover_from_read_error(slidx_table_t *slidx,
if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate))
return 0;
psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr);
+ psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr);
/*
* Check the privilege level of interrupted context.
* If it is user-mode, then terminate affected process.
*/
- if (psr1->cpl != 0) {
+
+ pmsa = sos->pal_min_state;
+ if (psr1->cpl != 0 ||
+ ((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) {
smei = peidx_bus_check(peidx, 0);
if (smei->valid.target_identifier) {
/*
* setup for resume to bottom half of MCA,
* "mca_handler_bhhook"
*/
- pmsa = sos->pal_min_state;
- /* pass to bhhook as 1st argument (gr8) */
+ /* pass to bhhook as argument (gr8, ...) */
pmsa->pmsa_gr[8-1] = smei->target_identifier;
+ pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip;
+ pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr;
/* set interrupted return address (but no use) */
pmsa->pmsa_br0 = pmsa->pmsa_iip;
/* change resume address to bottom half */
@@ -466,6 +473,7 @@ recover_from_read_error(slidx_table_t *slidx,
psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
psr2->cpl = 0;
psr2->ri = 0;
+ psr2->bn = 1;
psr2->i = 0;
return 1;
diff --git a/arch/ia64/kernel/mca_drv.h b/arch/ia64/kernel/mca_drv.h
index e2f6fa1e0ef6..31a2e52bb16f 100644
--- a/arch/ia64/kernel/mca_drv.h
+++ b/arch/ia64/kernel/mca_drv.h
@@ -111,3 +111,10 @@ typedef struct slidx_table {
slidx_foreach_entry(__pos, &((slidx)->sec)) { __count++; }\
__count; })
+struct mca_table_entry {
+ int start_addr; /* location-relative starting address of MCA recoverable range */
+ int end_addr; /* location-relative ending address of MCA recoverable range */
+};
+
+extern const struct mca_table_entry *search_mca_tables (unsigned long addr);
+extern int mca_recover_range(unsigned long);
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S
index 3f298ee4d00c..e6a580d354b9 100644
--- a/arch/ia64/kernel/mca_drv_asm.S
+++ b/arch/ia64/kernel/mca_drv_asm.S
@@ -14,15 +14,12 @@
GLOBAL_ENTRY(mca_handler_bhhook)
invala // clear RSE ?
- ;;
cover
;;
clrrrb
;;
- alloc r16=ar.pfs,0,2,1,0 // make a new frame
- ;;
+ alloc r16=ar.pfs,0,2,3,0 // make a new frame
mov ar.rsc=0
- ;;
mov r13=IA64_KR(CURRENT) // current task pointer
;;
mov r2=r13
@@ -30,7 +27,6 @@ GLOBAL_ENTRY(mca_handler_bhhook)
addl r22=IA64_RBS_OFFSET,r2
;;
mov ar.bspstore=r22
- ;;
addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2
;;
adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
@@ -40,12 +36,12 @@ GLOBAL_ENTRY(mca_handler_bhhook)
movl loc1=mca_handler_bh // recovery C function
;;
mov out0=r8 // poisoned address
+ mov out1=r9 // iip
+ mov out2=r10 // psr
mov b6=loc1
;;
mov loc1=rp
- ;;
- ssm psr.i
- ;;
+ ssm psr.i | psr.ic
br.call.sptk.many rp=b6 // does not return ...
;;
mov ar.pfs=loc0
@@ -53,5 +49,4 @@ GLOBAL_ENTRY(mca_handler_bhhook)
;;
mov r8=r0
br.ret.sptk.many rp
- ;;
END(mca_handler_bhhook)
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 632d65cc0685..0b9e56dd7f05 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -130,6 +130,15 @@ SECTIONS
__initcall_end = .;
}
+ /* MCA table */
+ . = ALIGN(16);
+ __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET)
+ {
+ __start___mca_table = .;
+ *(__mca_table)
+ __stop___mca_table = .;
+ }
+
.data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
{
__start___vtop_patchlist = .;
diff --git a/include/asm-ia64/asmmacro.h b/include/asm-ia64/asmmacro.h
index 77af457f4ad7..d4cec32083d8 100644
--- a/include/asm-ia64/asmmacro.h
+++ b/include/asm-ia64/asmmacro.h
@@ -51,6 +51,17 @@ name:
[99:] x
/*
+ * Tag MCA recoverable instruction ranges.
+ */
+
+ .section "__mca_table", "a" // declare section & section attributes
+ .previous
+
+# define MCA_RECOVER_RANGE(y) \
+ .xdata4 "__mca_table", y-., 99f-.; \
+ [99:]
+
+/*
* Mark instructions that need a load of a virtual address patched to be
* a load of a physical address. We use this either in critical performance
* path (ivt.S - TLB miss processing) or in places where it might not be
OpenPOWER on IntegriCloud