summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/configs/generic_defconfig (renamed from arch/ia64/defconfig)0
-rw-r--r--arch/ia64/ia32/ia32_signal.c13
-rw-r--r--arch/ia64/kernel/iosapic.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c134
-rw-r--r--arch/ia64/kernel/kprobes.c5
-rw-r--r--arch/ia64/kernel/msi_ia64.c3
-rw-r--r--arch/ia64/kernel/sal.c7
-rw-r--r--arch/ia64/kernel/signal.c36
-rw-r--r--include/asm-ia64/Kbuild2
-rw-r--r--include/asm-ia64/hw_irq.h12
-rw-r--r--include/asm-ia64/kprobes.h4
-rw-r--r--include/asm-ia64/sal.h4
14 files changed, 180 insertions, 48 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 56762d3c2a6a..8fa3faf5ef1b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -156,6 +156,8 @@ config IA64_HP_ZX1_SWIOTLB
config IA64_SGI_SN2
bool "SGI-SN2"
+ select NUMA
+ select ACPI_NUMA
help
Selecting this option will optimize the kernel for use on sn2 based
systems, but the resulting kernel binary will not run on other
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index b916ccfdef84..f1645c4f7039 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -11,6 +11,8 @@
# Copyright (C) 1998-2004 by David Mosberger-Tang <davidm@hpl.hp.com>
#
+KBUILD_DEFCONFIG := generic_defconfig
+
NM := $(CROSS_COMPILE)nm -B
READELF := $(CROSS_COMPILE)readelf
diff --git a/arch/ia64/defconfig b/arch/ia64/configs/generic_defconfig
index 0210545e7f61..0210545e7f61 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/configs/generic_defconfig
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
index 85e82f32e480..256a7faeda07 100644
--- a/arch/ia64/ia32/ia32_signal.c
+++ b/arch/ia64/ia32/ia32_signal.c
@@ -766,8 +766,19 @@ get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
- if (!on_sig_stack(esp))
+ int onstack = sas_ss_flags(esp);
+
+ if (onstack == 0)
esp = current->sas_ss_sp + current->sas_ss_size;
+ else if (onstack == SS_ONSTACK) {
+ /*
+ * If we are on the alternate signal stack and would
+ * overflow it, don't. Return an always-bogus address
+ * instead so we will die with SIGSEGV.
+ */
+ if (!likely(on_sig_stack(esp - frame_size)))
+ return (void __user *) -1L;
+ }
}
/* Legacy stack switching not supported */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 398e2fd1cd25..7b3292282dea 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -345,7 +345,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
if (cpus_empty(mask))
return;
- if (reassign_irq_vector(irq, first_cpu(mask)))
+ if (irq_prepare_move(irq, first_cpu(mask)))
return;
dest = cpu_physical_id(first_cpu(mask));
@@ -397,6 +397,7 @@ iosapic_end_level_irq (unsigned int irq)
struct iosapic_rte_info *rte;
int do_unmask_irq = 0;
+ irq_complete_move(irq);
if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
do_unmask_irq = 1;
mask_irq(irq);
@@ -450,6 +451,7 @@ iosapic_ack_edge_irq (unsigned int irq)
{
irq_desc_t *idesc = irq_desc + irq;
+ irq_complete_move(irq);
move_native_irq(irq);
/*
* Once we have recorded IRQ_PENDING already, we can mask the
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 0b52f19ed046..2b8cf6e85af4 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -260,6 +260,8 @@ void __setup_vector_irq(int cpu)
}
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
+#define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR
+
static enum vector_domain_type {
VECTOR_DOMAIN_NONE,
VECTOR_DOMAIN_PERCPU
@@ -272,6 +274,101 @@ static cpumask_t vector_allocation_domain(int cpu)
return CPU_MASK_ALL;
}
+static int __irq_prepare_move(int irq, int cpu)
+{
+ struct irq_cfg *cfg = &irq_cfg[irq];
+ int vector;
+ cpumask_t domain;
+
+ if (cfg->move_in_progress || cfg->move_cleanup_count)
+ return -EBUSY;
+ if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
+ return -EINVAL;
+ if (cpu_isset(cpu, cfg->domain))
+ return 0;
+ domain = vector_allocation_domain(cpu);
+ vector = find_unassigned_vector(domain);
+ if (vector < 0)
+ return -ENOSPC;
+ cfg->move_in_progress = 1;
+ cfg->old_domain = cfg->domain;
+ cfg->vector = IRQ_VECTOR_UNASSIGNED;
+ cfg->domain = CPU_MASK_NONE;
+ BUG_ON(__bind_irq_vector(irq, vector, domain));
+ return 0;
+}
+
+int irq_prepare_move(int irq, int cpu)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&vector_lock, flags);
+ ret = __irq_prepare_move(irq, cpu);
+ spin_unlock_irqrestore(&vector_lock, flags);
+ return ret;
+}
+
+void irq_complete_move(unsigned irq)
+{
+ struct irq_cfg *cfg = &irq_cfg[irq];
+ cpumask_t cleanup_mask;
+ int i;
+
+ if (likely(!cfg->move_in_progress))
+ return;
+
+ if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
+ return;
+
+ cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
+ cfg->move_cleanup_count = cpus_weight(cleanup_mask);
+ for_each_cpu_mask(i, cleanup_mask)
+ platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
+ cfg->move_in_progress = 0;
+}
+
+static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
+{
+ int me = smp_processor_id();
+ ia64_vector vector;
+ unsigned long flags;
+
+ for (vector = IA64_FIRST_DEVICE_VECTOR;
+ vector < IA64_LAST_DEVICE_VECTOR; vector++) {
+ int irq;
+ struct irq_desc *desc;
+ struct irq_cfg *cfg;
+ irq = __get_cpu_var(vector_irq)[vector];
+ if (irq < 0)
+ continue;
+
+ desc = irq_desc + irq;
+ cfg = irq_cfg + irq;
+ spin_lock(&desc->lock);
+ if (!cfg->move_cleanup_count)
+ goto unlock;
+
+ if (!cpu_isset(me, cfg->old_domain))
+ goto unlock;
+
+ spin_lock_irqsave(&vector_lock, flags);
+ __get_cpu_var(vector_irq)[vector] = -1;
+ cpu_clear(me, vector_table[vector]);
+ spin_unlock_irqrestore(&vector_lock, flags);
+ cfg->move_cleanup_count--;
+ unlock:
+ spin_unlock(&desc->lock);
+ }
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq_move_irqaction = {
+ .handler = smp_irq_move_cleanup_interrupt,
+ .flags = IRQF_DISABLED,
+ .name = "irq_move"
+};
+
static int __init parse_vector_domain(char *arg)
{
if (!arg)
@@ -303,36 +400,6 @@ void destroy_and_reserve_irq(unsigned int irq)
spin_unlock_irqrestore(&vector_lock, flags);
}
-static int __reassign_irq_vector(int irq, int cpu)
-{
- struct irq_cfg *cfg = &irq_cfg[irq];
- int vector;
- cpumask_t domain;
-
- if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
- return -EINVAL;
- if (cpu_isset(cpu, cfg->domain))
- return 0;
- domain = vector_allocation_domain(cpu);
- vector = find_unassigned_vector(domain);
- if (vector < 0)
- return -ENOSPC;
- __clear_irq_vector(irq);
- BUG_ON(__bind_irq_vector(irq, vector, domain));
- return 0;
-}
-
-int reassign_irq_vector(int irq, int cpu)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&vector_lock, flags);
- ret = __reassign_irq_vector(irq, cpu);
- spin_unlock_irqrestore(&vector_lock, flags);
- return ret;
-}
-
/*
* Dynamic irq allocate and deallocation for MSI
*/
@@ -578,6 +645,13 @@ init_IRQ (void)
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
+#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
+ if (vector_domain_type != VECTOR_DOMAIN_NONE) {
+ BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
+ IA64_FIRST_DEVICE_VECTOR++;
+ register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
+ }
+#endif
#endif
#ifdef CONFIG_PERFMON
pfm_init_percpu();
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index b618487cdc85..615c3d2b6348 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -1001,6 +1001,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
return 1;
}
+/* ia64 does not need this */
+void __kprobes jprobe_return(void)
+{
+}
+
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index e86d02959794..60c6ef67ebb2 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -57,7 +57,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
if (!cpu_online(cpu))
return;
- if (reassign_irq_vector(irq, cpu))
+ if (irq_prepare_move(irq, cpu))
return;
read_msi_msg(irq, &msg);
@@ -119,6 +119,7 @@ void ia64_teardown_msi_irq(unsigned int irq)
static void ia64_ack_msi_irq(unsigned int irq)
{
+ irq_complete_move(irq);
move_native_irq(irq);
ia64_eoi();
}
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index f44fe8412162..a3022dc48ef8 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -109,6 +109,13 @@ check_versions (struct ia64_sal_systab *systab)
sal_revision = SAL_VERSION_CODE(2, 8);
sal_version = SAL_VERSION_CODE(0, 0);
}
+
+ if (ia64_platform_is("sn2") && (sal_revision == SAL_VERSION_CODE(2, 9)))
+ /*
+ * SGI Altix has hard-coded version 2.9 in their prom
+ * but they actually implement 3.2, so let's fix it here.
+ */
+ sal_revision = SAL_VERSION_CODE(3, 2);
}
static void __init
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 309da3567bc8..5740296c35af 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -342,15 +342,33 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
new_sp = scr->pt.r12;
tramp_addr = (unsigned long) __kernel_sigtramp;
- if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(new_sp) == 0) {
- new_sp = current->sas_ss_sp + current->sas_ss_size;
- /*
- * We need to check for the register stack being on the signal stack
- * separately, because it's switched separately (memory stack is switched
- * in the kernel, register stack is switched in the signal trampoline).
- */
- if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
- new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1);
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ int onstack = sas_ss_flags(new_sp);
+
+ if (onstack == 0) {
+ new_sp = current->sas_ss_sp + current->sas_ss_size;
+ /*
+ * We need to check for the register stack being on the
+ * signal stack separately, because it's switched
+ * separately (memory stack is switched in the kernel,
+ * register stack is switched in the signal trampoline).
+ */
+ if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
+ new_rbs = ALIGN(current->sas_ss_sp,
+ sizeof(long));
+ } else if (onstack == SS_ONSTACK) {
+ unsigned long check_sp;
+
+ /*
+ * If we are on the alternate signal stack and would
+ * overflow it, don't. Return an always-bogus address
+ * instead so we will die with SIGSEGV.
+ */
+ check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN;
+ if (!likely(on_sig_stack(check_sp)))
+ return force_sigsegv_info(sig, (void __user *)
+ check_sp);
+ }
}
frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN);
diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild
index 4a1e48b9f403..eb24a3f47caa 100644
--- a/include/asm-ia64/Kbuild
+++ b/include/asm-ia64/Kbuild
@@ -3,7 +3,6 @@ include include/asm-generic/Kbuild.asm
header-y += break.h
header-y += fpu.h
header-y += fpswa.h
-header-y += gcc_intrin.h
header-y += ia64regs.h
header-y += intel_intrin.h
header-y += intrinsics.h
@@ -12,5 +11,6 @@ header-y += ptrace_offsets.h
header-y += rse.h
header-y += ucontext.h
+unifdef-y += gcc_intrin.h
unifdef-y += perfmon.h
unifdef-y += ustack.h
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h
index 7e6e3779670a..76366dc9c1a0 100644
--- a/include/asm-ia64/hw_irq.h
+++ b/include/asm-ia64/hw_irq.h
@@ -93,6 +93,9 @@ extern __u8 isa_irq_to_vector_map[16];
struct irq_cfg {
ia64_vector vector;
cpumask_t domain;
+ cpumask_t old_domain;
+ unsigned move_cleanup_count;
+ u8 move_in_progress : 1;
};
extern spinlock_t vector_lock;
extern struct irq_cfg irq_cfg[NR_IRQS];
@@ -106,12 +109,19 @@ extern int assign_irq_vector (int irq); /* allocate a free vector */
extern void free_irq_vector (int vector);
extern int reserve_irq_vector (int vector);
extern void __setup_vector_irq(int cpu);
-extern int reassign_irq_vector(int irq, int cpu);
extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
extern int check_irq_used (int irq);
extern void destroy_and_reserve_irq (unsigned int irq);
+#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
+extern int irq_prepare_move(int irq, int cpu);
+extern void irq_complete_move(unsigned int irq);
+#else
+static inline int irq_prepare_move(int irq, int cpu) { return 0; }
+static inline void irq_complete_move(unsigned int irq) {}
+#endif
+
static inline void ia64_resend_irq(unsigned int vector)
{
platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index adbaba14eb0a..8233b3a964c6 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -121,10 +121,6 @@ extern int kprobes_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
-/* ia64 does not need this */
-static inline void jprobe_return(void)
-{
-}
extern void invalidate_stacked_regs(void);
extern void flush_register_stack(void);
extern void arch_remove_kprobe(struct kprobe *p);
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h
index 2251118894ae..f4904db3b057 100644
--- a/include/asm-ia64/sal.h
+++ b/include/asm-ia64/sal.h
@@ -807,6 +807,10 @@ static inline s64
ia64_sal_physical_id_info(u16 *splid)
{
struct ia64_sal_retval isrv;
+
+ if (sal_revision < SAL_VERSION_CODE(3,2))
+ return -1;
+
SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0);
if (splid)
*splid = isrv.v0;
OpenPOWER on IntegriCloud