summaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/iosapic.c4
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c12
-rw-r--r--arch/ia64/kernel/kprobes.c2
-rw-r--r--arch/ia64/kernel/msi_ia64.c4
-rw-r--r--arch/ia64/kernel/smpboot.c5
-rw-r--r--arch/ia64/kernel/sys_ia64.c2
-rw-r--r--arch/ia64/kernel/unaligned.c6
-rw-r--r--arch/ia64/kernel/unwind.c2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
12 files changed, 31 insertions, 17 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index d541671caf4a..bdef2ce38c8b 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -199,6 +199,10 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
return __va(phys_addr);
}
+void __init __acpi_unmap_table(char *map, unsigned long size)
+{
+}
+
/* --------------------------------------------------------------------------
Boot-time Table Parsing
-------------------------------------------------------------------------- */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index d435f4a7a96c..e5341e2c1175 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1442,7 +1442,7 @@ sys_call_table:
data8 sys_mkdir // 1055
data8 sys_rmdir
data8 sys_dup
- data8 sys_pipe
+ data8 sys_ia64_pipe
data8 sys_times
data8 ia64_brk // 1060
data8 sys_setgid
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 5cfd3d91001a..166e0d839fa0 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -507,7 +507,7 @@ static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
if (trigger == IOSAPIC_EDGE)
return -EINVAL;
- for (i = 0; i <= NR_IRQS; i++) {
+ for (i = 0; i < NR_IRQS; i++) {
info = &iosapic_intr_info[i];
if (info->trigger == trigger && info->polarity == pol &&
(info->dmode == IOSAPIC_FIXED ||
@@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
if (iosapic_intr_info[irq].count == 0) {
#ifdef CONFIG_SMP
/* Clear affinity */
- cpus_setall(idesc->affinity);
+ cpumask_setall(idesc->affinity);
#endif
/* Clear the interrupt information */
iosapic_intr_info[irq].dest = 0;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index a58f64ca9f0e..226233a6fa19 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
{
if (irq < NR_IRQS) {
- cpumask_copy(&irq_desc[irq].affinity,
+ cpumask_copy(irq_desc[irq].affinity,
cpumask_of(cpu_logical_id(hwid)));
irq_redir[irq] = (char) (redir & 0xff);
}
@@ -148,7 +148,7 @@ static void migrate_irqs(void)
if (desc->status == IRQ_PER_CPU)
continue;
- if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
+ if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
>= nr_cpu_ids) {
/*
* Save it for phase 2 processing
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 28d3d483db92..927ad027820c 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -493,11 +493,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
ia64_srlz_d();
while (vector != IA64_SPURIOUS_INT_VECTOR) {
+ struct irq_desc *desc = irq_to_desc(vector);
+
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
smp_local_flush_tlb();
- kstat_this_cpu.irqs[vector]++;
+ kstat_incr_irqs_this_cpu(vector, desc);
} else if (unlikely(IS_RESCHEDULE(vector)))
- kstat_this_cpu.irqs[vector]++;
+ kstat_incr_irqs_this_cpu(vector, desc);
else {
int irq = local_vector_to_irq(vector);
@@ -551,11 +553,13 @@ void ia64_process_pending_intr(void)
* Perform normal interrupt style processing
*/
while (vector != IA64_SPURIOUS_INT_VECTOR) {
+ struct irq_desc *desc = irq_to_desc(vector);
+
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
smp_local_flush_tlb();
- kstat_this_cpu.irqs[vector]++;
+ kstat_incr_irqs_this_cpu(vector, desc);
} else if (unlikely(IS_RESCHEDULE(vector)))
- kstat_this_cpu.irqs[vector]++;
+ kstat_incr_irqs_this_cpu(vector, desc);
else {
struct pt_regs *old_regs = set_irq_regs(NULL);
int irq = local_vector_to_irq(vector);
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index f90be51b1123..9adac441ac9b 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -870,7 +870,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
return 1;
ss_probe:
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER)
if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
/* Boost up -- we can execute copied instructions directly */
ia64_psr(regs)->ri = p->ainsn.slot;
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 890339339035..dcb6b7c51ea7 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
msg.data = data;
write_msi_msg(irq, &msg);
- irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+ cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
}
#endif /* CONFIG_SMP */
@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
dmar_msi_write(irq, &msg);
- irq_desc[irq].affinity = *mask;
+ cpumask_copy(irq_desc[irq].affinity, mask);
}
#endif /* CONFIG_SMP */
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 11463994a7d5..52290547c85b 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -736,14 +736,15 @@ int __cpu_disable(void)
return -EBUSY;
}
+ cpu_clear(cpu, cpu_online_map);
+
if (migrate_platform_irqs(cpu)) {
cpu_set(cpu, cpu_online_map);
- return (-EBUSY);
+ return -EBUSY;
}
remove_siblinginfo(cpu);
fixup_irqs();
- cpu_clear(cpu, cpu_online_map);
local_flush_tlb_all();
cpu_clear(cpu, cpu_callin_map);
return 0;
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index bcbb6d8792d3..92ed83f34036 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -154,7 +154,7 @@ out:
* and r9) as this is faster than doing a copy_to_user().
*/
asmlinkage long
-sys_pipe (void)
+sys_ia64_pipe (void)
{
struct pt_regs *regs = task_pt_regs(current);
int fd[2];
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index ff0e7c10faa7..6db08599ebbc 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -59,6 +59,7 @@ dump (const char *str, void *vp, size_t len)
* (i.e. don't allow attacker to fill up logs with unaligned accesses).
*/
int no_unaligned_warning;
+int unaligned_dump_stack;
static int noprint_warning;
/*
@@ -1371,9 +1372,12 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
}
}
} else {
- if (within_logging_rate_limit())
+ if (within_logging_rate_limit()) {
printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n",
ifa, regs->cr_iip + ipsr->ri);
+ if (unaligned_dump_stack)
+ dump_stack();
+ }
set_fs(KERNEL_DS);
}
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index 67810b77d998..b6c0e63a0bf6 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -2149,7 +2149,7 @@ unw_remove_unwind_table (void *handle)
/* next, remove hash table entries for this table */
- for (index = 0; index <= UNW_HASH_SIZE; ++index) {
+ for (index = 0; index < UNW_HASH_SIZE; ++index) {
tmp = unw.cache + unw.hash[index];
if (unw.hash[index] >= UNW_CACHE_SIZE
|| tmp->ip < table->start || tmp->ip >= table->end)
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 10a7d47e8510..f45e4e508eca 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -219,6 +219,7 @@ SECTIONS
.data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
{
__per_cpu_start = .;
+ *(.data.percpu.page_aligned)
*(.data.percpu)
*(.data.percpu.shared_aligned)
__per_cpu_end = .;
OpenPOWER on IntegriCloud