summaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/acpi.c19
-rw-r--r--arch/ia64/kernel/entry.S26
-rw-r--r--arch/ia64/kernel/init_task.c1
-rw-r--r--arch/ia64/kernel/iosapic.c2
-rw-r--r--arch/ia64/kernel/ivt.S84
-rw-r--r--arch/ia64/kernel/minstate.h46
-rw-r--r--arch/ia64/kernel/palinfo.c6
-rw-r--r--arch/ia64/kernel/patch.c23
-rw-r--r--arch/ia64/kernel/perfmon.c26
-rw-r--r--arch/ia64/kernel/process.c25
-rw-r--r--arch/ia64/kernel/sal.c18
-rw-r--r--arch/ia64/kernel/setup.c46
-rw-r--r--arch/ia64/kernel/time.c1
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S7
14 files changed, 240 insertions, 90 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 19709a079635..43687cc60dfb 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -117,7 +117,10 @@ acpi_get_sysname(void)
if (!strcmp(hdr->oem_id, "HP")) {
return "hpzx1";
} else if (!strcmp(hdr->oem_id, "SGI")) {
- return "sn2";
+ if (!strcmp(hdr->oem_table_id + 4, "UV"))
+ return "uv";
+ else
+ return "sn2";
}
return "dig";
@@ -130,6 +133,8 @@ acpi_get_sysname(void)
return "hpzx1_swiotlb";
# elif defined (CONFIG_IA64_SGI_SN2)
return "sn2";
+# elif defined (CONFIG_IA64_SGI_UV)
+ return "uv";
# elif defined (CONFIG_IA64_DIG)
return "dig";
# else
@@ -460,7 +465,6 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
printk(KERN_ERR
"ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
len, slit->header.length);
- memset(numa_slit, 10, sizeof(numa_slit));
return;
}
slit_table = slit;
@@ -569,8 +573,14 @@ void __init acpi_numa_arch_fixup(void)
printk(KERN_INFO "Number of memory chunks in system = %d\n",
num_node_memblks);
- if (!slit_table)
+ if (!slit_table) {
+ for (i = 0; i < MAX_NUMNODES; i++)
+ for (j = 0; j < MAX_NUMNODES; j++)
+ node_distance(i, j) = i == j ? LOCAL_DISTANCE :
+ REMOTE_DISTANCE;
return;
+ }
+
memset(numa_slit, -1, sizeof(numa_slit));
for (i = 0; i < slit_table->locality_count; i++) {
if (!pxm_bit_test(i))
@@ -622,6 +632,9 @@ void acpi_unregister_gsi(u32 gsi)
if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
return;
+ if (has_8259 && gsi < 16)
+ return;
+
iosapic_unregister_intr(gsi);
}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index e49ad8c5dc69..ca2bb95726de 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1156,6 +1156,9 @@ skip_rbs_switch:
* r31 = current->thread_info->flags
* On exit:
* p6 = TRUE if work-pending-check needs to be redone
+ *
+ * Interrupts are disabled on entry, reenabled depend on work, and
+ * disabled on exit.
*/
.work_pending_syscall:
add r2=-8,r2
@@ -1164,16 +1167,16 @@ skip_rbs_switch:
st8 [r2]=r8
st8 [r3]=r10
.work_pending:
- tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
+ tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed?
(p6) br.cond.sptk.few .notify
#ifdef CONFIG_PREEMPT
(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
;;
(pKStk) st4 [r20]=r21
- ssm psr.i // enable interrupts
#endif
+ ssm psr.i // enable interrupts
br.call.spnt.many rp=schedule
-.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
+.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
rsm psr.i // disable interrupts
;;
#ifdef CONFIG_PREEMPT
@@ -1182,13 +1185,13 @@ skip_rbs_switch:
(pKStk) st4 [r20]=r0 // preempt_count() <- 0
#endif
(pLvSys)br.cond.sptk.few .work_pending_syscall_end
- br.cond.sptk.many .work_processed_kernel // re-check
+ br.cond.sptk.many .work_processed_kernel
.notify:
(pUStk) br.call.spnt.many rp=notify_resume_user
-.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
+.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
(pLvSys)br.cond.sptk.few .work_pending_syscall_end
- br.cond.sptk.many .work_processed_kernel // don't re-check
+ br.cond.sptk.many .work_processed_kernel
.work_pending_syscall_end:
adds r2=PT(R8)+16,r12
@@ -1196,7 +1199,7 @@ skip_rbs_switch:
;;
ld8 r8=[r2]
ld8 r10=[r3]
- br.cond.sptk.many .work_processed_syscall // re-check
+ br.cond.sptk.many .work_processed_syscall
END(ia64_leave_kernel)
@@ -1234,9 +1237,12 @@ GLOBAL_ENTRY(ia64_invoke_schedule_tail)
END(ia64_invoke_schedule_tail)
/*
- * Setup stack and call do_notify_resume_user(). Note that pSys and pNonSys need to
- * be set up by the caller. We declare 8 input registers so the system call
- * args get preserved, in case we need to restart a system call.
+ * Setup stack and call do_notify_resume_user(), keeping interrupts
+ * disabled.
+ *
+ * Note that pSys and pNonSys need to be set up by the caller.
+ * We declare 8 input registers so the system call args get preserved,
+ * in case we need to restart a system call.
*/
ENTRY(notify_resume_user)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index bc8efcad28b8..9d7e1c66faf4 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -18,7 +18,6 @@
#include <asm/pgtable.h>
static struct fs_struct init_fs = INIT_FS;
-static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 082c31dcfd99..39752cdef6ff 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -558,8 +558,6 @@ static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
NR_PREALLOCATE_RTE_ENTRIES);
- if (!rte)
- return NULL;
for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
list_add(&rte->rte_list, &free_rte_list);
}
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 6678c49daba3..80b44ea052d7 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -1076,48 +1076,6 @@ END(ia64_syscall_setup)
DBG_FAULT(15)
FAULT(15)
- /*
- * Squatting in this space ...
- *
- * This special case dispatcher for illegal operation faults allows preserved
- * registers to be modified through a callback function (asm only) that is handed
- * back from the fault handler in r8. Up to three arguments can be passed to the
- * callback function by returning an aggregate with the callback as its first
- * element, followed by the arguments.
- */
-ENTRY(dispatch_illegal_op_fault)
- .prologue
- .body
- SAVE_MIN_WITH_COVER
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
-(p15) ssm psr.i // restore psr.i
- adds r3=8,r2 // set up second base pointer for SAVE_REST
- ;;
- alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
- mov out0=ar.ec
- ;;
- SAVE_REST
- PT_REGS_UNWIND_INFO(0)
- ;;
- br.call.sptk.many rp=ia64_illegal_op_fault
-.ret0: ;;
- alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
- mov out0=r9
- mov out1=r10
- mov out2=r11
- movl r15=ia64_leave_kernel
- ;;
- mov rp=r15
- mov b6=r8
- ;;
- cmp.ne p6,p0=0,r8
-(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
- br.sptk.many ia64_leave_kernel
-END(dispatch_illegal_op_fault)
-
.org ia64_ivt+0x4000
/////////////////////////////////////////////////////////////////////////////////////////
// 0x4000 Entry 16 (size 64 bundles) Reserved
@@ -1715,6 +1673,48 @@ END(ia32_interrupt)
DBG_FAULT(67)
FAULT(67)
+ /*
+ * Squatting in this space ...
+ *
+ * This special case dispatcher for illegal operation faults allows preserved
+ * registers to be modified through a callback function (asm only) that is handed
+ * back from the fault handler in r8. Up to three arguments can be passed to the
+ * callback function by returning an aggregate with the callback as its first
+ * element, followed by the arguments.
+ */
+ENTRY(dispatch_illegal_op_fault)
+ .prologue
+ .body
+ SAVE_MIN_WITH_COVER
+ ssm psr.ic | PSR_DEFAULT_BITS
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+(p15) ssm psr.i // restore psr.i
+ adds r3=8,r2 // set up second base pointer for SAVE_REST
+ ;;
+ alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
+ mov out0=ar.ec
+ ;;
+ SAVE_REST
+ PT_REGS_UNWIND_INFO(0)
+ ;;
+ br.call.sptk.many rp=ia64_illegal_op_fault
+.ret0: ;;
+ alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
+ mov out0=r9
+ mov out1=r10
+ mov out2=r11
+ movl r15=ia64_leave_kernel
+ ;;
+ mov rp=r15
+ mov b6=r8
+ ;;
+ cmp.ne p6,p0=0,r8
+(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
+ br.sptk.many ia64_leave_kernel
+END(dispatch_illegal_op_fault)
+
#ifdef CONFIG_IA32_SUPPORT
/*
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index 7c548ac52bbc..74b6d670aaef 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -15,6 +15,9 @@
#define ACCOUNT_SYS_ENTER
#endif
+.section ".data.patch.rse", "a"
+.previous
+
/*
* DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
* the minimum state necessary that allows us to turn psr.ic back
@@ -40,7 +43,7 @@
* Note that psr.ic is NOT turned on by this macro. This is so that
* we can pass interruption state as arguments to a handler.
*/
-#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
+#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA,WORKAROUND) \
mov r16=IA64_KR(CURRENT); /* M */ \
mov r27=ar.rsc; /* M */ \
mov r20=r1; /* A */ \
@@ -87,6 +90,7 @@
tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
mov r29=b0 \
;; \
+ WORKAROUND; \
adds r16=PT(R8),r1; /* initialize first base pointer */ \
adds r17=PT(R9),r1; /* initialize second base pointer */ \
(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
@@ -206,6 +210,40 @@
st8 [r25]=r10; /* ar.ssd */ \
;;
-#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
-#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
-#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, )
+#define RSE_WORKAROUND \
+(pUStk) extr.u r17=r18,3,6; \
+(pUStk) sub r16=r18,r22; \
+[1:](pKStk) br.cond.sptk.many 1f; \
+ .xdata4 ".data.patch.rse",1b-. \
+ ;; \
+ cmp.ge p6,p7 = 33,r17; \
+ ;; \
+(p6) mov r17=0x310; \
+(p7) mov r17=0x308; \
+ ;; \
+ cmp.leu p1,p0=r16,r17; \
+(p1) br.cond.sptk.many 1f; \
+ dep.z r17=r26,0,62; \
+ movl r16=2f; \
+ ;; \
+ mov ar.pfs=r17; \
+ dep r27=r0,r27,16,14; \
+ mov b0=r16; \
+ ;; \
+ br.ret.sptk b0; \
+ ;; \
+2: \
+ mov ar.rsc=r0 \
+ ;; \
+ flushrs; \
+ ;; \
+ mov ar.bspstore=r22 \
+ ;; \
+ mov r18=ar.bsp; \
+ ;; \
+1: \
+ .pred.rel "mutex", pKStk, pUStk
+
+#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs, , RSE_WORKAROUND)
+#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND)
+#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , )
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 4547a2092af9..9dc00f7fe10e 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -900,12 +900,6 @@ static void
palinfo_smp_call(void *info)
{
palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
- if (data == NULL) {
- printk(KERN_ERR "palinfo: data pointer is NULL\n");
- data->ret = 0; /* no output */
- return;
- }
- /* does this actual call */
data->ret = (*data->func)(data->page);
}
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c
index e0dca8743dbb..b83b2c516008 100644
--- a/arch/ia64/kernel/patch.c
+++ b/arch/ia64/kernel/patch.c
@@ -115,6 +115,29 @@ ia64_patch_vtop (unsigned long start, unsigned long end)
ia64_srlz_i();
}
+/*
+ * Disable the RSE workaround by turning the conditional branch
+ * that we tagged in each place the workaround was used into an
+ * unconditional branch.
+ */
+void __init
+ia64_patch_rse (unsigned long start, unsigned long end)
+{
+ s32 *offp = (s32 *) start;
+ u64 ip, *b;
+
+ while (offp < (s32 *) end) {
+ ip = (u64) offp + *offp;
+
+ b = (u64 *)(ip & -16);
+ b[1] &= ~0xf800000L;
+ ia64_fc((void *) ip);
+ ++offp;
+ }
+ ia64_sync_i();
+ ia64_srlz_i();
+}
+
void __init
ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
{
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index c1ad27de2dd2..7714a97b0104 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1864,11 +1864,6 @@ pfm_flush(struct file *filp, fl_owner_t id)
* invoked after, it will find an empty queue and no
* signal will be sent. In both case, we are safe
*/
- if (filp->f_flags & FASYNC) {
- DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
- pfm_do_fasync (-1, filp, ctx, 0);
- }
-
PROTECT_CTX(ctx, flags);
state = ctx->ctx_state;
@@ -1999,6 +1994,11 @@ pfm_close(struct inode *inode, struct file *filp)
return -EBADF;
}
+ if (filp->f_flags & FASYNC) {
+ DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
+ pfm_do_fasync(-1, filp, ctx, 0);
+ }
+
PROTECT_CTX(ctx, flags);
state = ctx->ctx_state;
@@ -5013,12 +5013,13 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
}
static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
+
/*
* pfm_handle_work() can be called with interrupts enabled
* (TIF_NEED_RESCHED) or disabled. The down_interruptible
* call may sleep, therefore we must re-enable interrupts
* to avoid deadlocks. It is safe to do so because this function
- * is called ONLY when returning to user level (PUStk=1), in which case
+ * is called ONLY when returning to user level (pUStk=1), in which case
* there is no risk of kernel stack overflow due to deep
* interrupt nesting.
*/
@@ -5034,7 +5035,8 @@ pfm_handle_work(void)
ctx = PFM_GET_CTX(current);
if (ctx == NULL) {
- printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current));
+ printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
+ task_pid_nr(current));
return;
}
@@ -5058,11 +5060,12 @@ pfm_handle_work(void)
/*
* must be done before we check for simple-reset mode
*/
- if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
-
+ if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
+ goto do_zombie;
//if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
- if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
+ if (reason == PFM_TRAP_REASON_RESET)
+ goto skip_blocking;
/*
* restore interrupt mask to what it was on entry.
@@ -5110,7 +5113,8 @@ do_zombie:
/*
* in case of interruption of down() we don't restart anything
*/
- if (ret < 0) goto nothing_to_do;
+ if (ret < 0)
+ goto nothing_to_do;
skip_blocking:
pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 58dcfac5ea88..a3a34b4eb038 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -167,11 +167,18 @@ void tsk_clear_notify_resume(struct task_struct *tsk)
clear_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME);
}
+/*
+ * do_notify_resume_user():
+ * Called from notify_resume_user at entry.S, with interrupts disabled.
+ */
void
-do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall)
+do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
{
if (fsys_mode(current, &scr->pt)) {
- /* defer signal-handling etc. until we return to privilege-level 0. */
+ /*
+ * defer signal-handling etc. until we return to
+ * privilege-level 0.
+ */
if (!ia64_psr(&scr->pt)->lp)
ia64_psr(&scr->pt)->lp = 1;
return;
@@ -179,16 +186,26 @@ do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall
#ifdef CONFIG_PERFMON
if (current->thread.pfm_needs_checking)
+ /*
+ * Note: pfm_handle_work() allow us to call it with interrupts
+ * disabled, and may enable interrupts within the function.
+ */
pfm_handle_work();
#endif
/* deal with pending signal delivery */
- if (test_thread_flag(TIF_SIGPENDING))
+ if (test_thread_flag(TIF_SIGPENDING)) {
+ local_irq_enable(); /* force interrupt enable */
ia64_do_signal(scr, in_syscall);
+ }
/* copy user rbs to kernel rbs */
- if (unlikely(test_thread_flag(TIF_RESTORE_RSE)))
+ if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) {
+ local_irq_enable(); /* force interrupt enable */
ia64_sync_krbs();
+ }
+
+ local_irq_disable(); /* force interrupt disable */
}
static int pal_halt = 1;
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index a3022dc48ef8..0464173ea568 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -229,6 +229,14 @@ static void __init sal_desc_ap_wakeup(void *p) { }
*/
static int sal_cache_flush_drops_interrupts;
+static int __init
+force_pal_cache_flush(char *str)
+{
+ sal_cache_flush_drops_interrupts = 1;
+ return 0;
+}
+early_param("force_pal_cache_flush", force_pal_cache_flush);
+
void __init
check_sal_cache_flush (void)
{
@@ -237,15 +245,17 @@ check_sal_cache_flush (void)
u64 vector, cache_type = 3;
struct ia64_sal_retval isrv;
+ if (sal_cache_flush_drops_interrupts)
+ return;
+
cpu = get_cpu();
local_irq_save(flags);
/*
- * Schedule a timer interrupt, wait until it's reported, and see if
- * SAL_CACHE_FLUSH drops it.
+ * Send ourselves a timer interrupt, wait until it's reported, and see
+ * if SAL_CACHE_FLUSH drops it.
*/
- ia64_set_itv(IA64_TIMER_VECTOR);
- ia64_set_itm(ia64_get_itc() + 1000);
+ platform_send_ipi(cpu, IA64_TIMER_VECTOR, IA64_IPI_DM_INT, 0);
while (!ia64_get_irr(IA64_TIMER_VECTOR))
cpu_relax();
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 5015ca1275ca..632cda8f2e76 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -239,6 +239,25 @@ __initcall(register_memory);
#ifdef CONFIG_KEXEC
+
+/*
+ * This function checks if the reserved crashkernel is allowed on the specific
+ * IA64 machine flavour. Machines without an IO TLB use swiotlb and require
+ * some memory below 4 GB (i.e. in 32 bit area), see the implementation of
+ * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
+ * in kdump case. See the comment in sba_init() in sba_iommu.c.
+ *
+ * So, the only machvec that really supports loading the kdump kernel
+ * over 4 GB is "sn2".
+ */
+static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
+{
+ if (ia64_platform_is("sn2") || ia64_platform_is("uv"))
+ return 1;
+ else
+ return pbase < (1UL << 32);
+}
+
static void __init setup_crashkernel(unsigned long total, int *n)
{
unsigned long long base = 0, size = 0;
@@ -252,6 +271,16 @@ static void __init setup_crashkernel(unsigned long total, int *n)
base = kdump_find_rsvd_region(size,
rsvd_region, *n);
}
+
+ if (!check_crashkernel_memory(base, size)) {
+ pr_warning("crashkernel: There would be kdump memory "
+ "at %ld GB but this is unusable because it "
+ "must\nbe below 4 GB. Change the memory "
+ "configuration of the machine.\n",
+ (unsigned long)(base >> 30));
+ return;
+ }
+
if (base != ~0UL) {
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
"for crashkernel (System RAM: %ldMB)\n",
@@ -518,7 +547,8 @@ setup_arch (char **cmdline_p)
# ifdef CONFIG_ACPI_NUMA
acpi_numa_init();
per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
- 32 : cpus_weight(early_cpu_possible_map)), additional_cpus);
+ 32 : cpus_weight(early_cpu_possible_map)),
+ additional_cpus > 0 ? additional_cpus : 0);
# endif
#else
# ifdef CONFIG_SMP
@@ -531,6 +561,17 @@ setup_arch (char **cmdline_p)
/* process SAL system table: */
ia64_sal_init(__va(efi.sal_systab));
+#ifdef CONFIG_ITANIUM
+ ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
+#else
+ {
+ u64 num_phys_stacked;
+
+ if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
+ ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
+ }
+#endif
+
#ifdef CONFIG_SMP
cpu_physical_id(0) = hard_smp_processor_id();
#endif
@@ -538,8 +579,6 @@ setup_arch (char **cmdline_p)
cpu_init(); /* initialize the bootstrap CPU */
mmu_context_init(); /* initialize context_id bitmap */
- check_sal_cache_flush();
-
#ifdef CONFIG_ACPI
acpi_boot_init();
#endif
@@ -567,6 +606,7 @@ setup_arch (char **cmdline_p)
ia64_mca_init();
platform_setup(cmdline_p);
+ check_sal_cache_flush();
paging_init();
}
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 8c73643f2d66..aad1b7b1fff9 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -117,6 +117,7 @@ void account_system_vtime(struct task_struct *tsk)
local_irq_restore(flags);
}
+EXPORT_SYMBOL_GPL(account_system_vtime);
/*
* Called from the timer interrupt handler to charge accumulated user time
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 80622acc95de..5929ab10a289 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -156,6 +156,13 @@ SECTIONS
__end___vtop_patchlist = .;
}
+ .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET)
+ {
+ __start___rse_patchlist = .;
+ *(.data.patch.rse)
+ __end___rse_patchlist = .;
+ }
+
.data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
{
__start___mckinley_e9_bundles = .;
OpenPOWER on IntegriCloud