summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-04-11 06:40:17 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-04-11 06:40:17 -0700
commitb3967dc566bc89df19e9aeb87b2fd483418b02e6 (patch)
tree97a0fff4c1d59e3395f6b6f9d4a226da3bf58d28 /arch/ia64
parentcde227afe6b997dce08bcfc2aa6e373fb56857b0 (diff)
parent0ffe984917b9cd6ecc19ffbc06f35869d8c18df8 (diff)
downloadblackbird-op-linux-b3967dc566bc89df19e9aeb87b2fd483418b02e6.tar.gz
blackbird-op-linux-b3967dc566bc89df19e9aeb87b2fd483418b02e6.zip
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] Prefetch mmap_sem in ia64_do_page_fault() [IA64] Failure to resume after INIT in user space [IA64] Pass more data to the MCA/INIT notify_die hooks [IA64] always map VGA framebuffer UC, even if it supports WB [IA64] fix bug in ia64 __mutex_fastpath_trylock [IA64] for_each_possible_cpu: ia64 [IA64] update HP CSR space discovery via ACPI [IA64] Wire up new syscalls {set,get}_robust_list [IA64] 'msg' may be used uninitialized in xpc_initiate_allocate() [IA64] Wire up new syscall sync_file_range()
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/acpi-ext.c143
-rw-r--r--arch/ia64/kernel/entry.S3
-rw-r--r--arch/ia64/kernel/mca.c33
-rw-r--r--arch/ia64/kernel/mca_asm.S10
-rw-r--r--arch/ia64/kernel/module.c2
-rw-r--r--arch/ia64/mm/fault.c3
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c2
7 files changed, 105 insertions, 91 deletions
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c
index 4a5574ff007b..fff82929d225 100644
--- a/arch/ia64/kernel/acpi-ext.c
+++ b/arch/ia64/kernel/acpi-ext.c
@@ -1,105 +1,104 @@
/*
- * arch/ia64/kernel/acpi-ext.c
+ * (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P.
+ * Alex Williamson <alex.williamson@hp.com>
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
*
- * Copyright (C) 2003 Hewlett-Packard
- * Copyright (C) Alex Williamson
- * Copyright (C) Bjorn Helgaas
- *
- * Vendor specific extensions to ACPI.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/acpi.h>
-#include <linux/efi.h>
#include <asm/acpi-ext.h>
-struct acpi_vendor_descriptor {
- u8 guid_id;
- efi_guid_t guid;
-};
+/*
+ * Device CSRs that do not appear in PCI config space should be described
+ * via ACPI. This would normally be done with Address Space Descriptors
+ * marked as "consumer-only," but old versions of Windows and Linux ignore
+ * the producer/consumer flag, so HP invented a vendor-defined resource to
+ * describe the location and size of CSR space.
+ */
-struct acpi_vendor_info {
- struct acpi_vendor_descriptor *descriptor;
- u8 *data;
- u32 length;
+struct acpi_vendor_uuid hp_ccsr_uuid = {
+ .subtype = 2,
+ .data = { 0xf9, 0xad, 0xe9, 0x69, 0x4f, 0x92, 0x5f, 0xab, 0xf6, 0x4a,
+ 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad },
};
-acpi_status
-acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
+static acpi_status hp_ccsr_locate(acpi_handle obj, u64 *base, u64 *length)
{
- struct acpi_vendor_info *info = (struct acpi_vendor_info *)context;
- struct acpi_resource_vendor *vendor;
- struct acpi_vendor_descriptor *descriptor;
- u32 byte_length;
-
- if (resource->type != ACPI_RESOURCE_TYPE_VENDOR)
- return AE_OK;
-
- vendor = (struct acpi_resource_vendor *)&resource->data;
- descriptor = (struct acpi_vendor_descriptor *)vendor->byte_data;
- if (vendor->byte_length <= sizeof(*info->descriptor) ||
- descriptor->guid_id != info->descriptor->guid_id ||
- efi_guidcmp(descriptor->guid, info->descriptor->guid))
- return AE_OK;
-
- byte_length = vendor->byte_length - sizeof(struct acpi_vendor_descriptor);
- info->data = acpi_os_allocate(byte_length);
- if (!info->data)
- return AE_NO_MEMORY;
-
- memcpy(info->data,
- vendor->byte_data + sizeof(struct acpi_vendor_descriptor),
- byte_length);
- info->length = byte_length;
- return AE_CTRL_TERMINATE;
-}
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_resource *resource;
+ struct acpi_resource_vendor_typed *vendor;
-acpi_status
-acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
- u8 ** data, u32 * byte_length)
-{
- struct acpi_vendor_info info;
+ status = acpi_get_vendor_resource(obj, METHOD_NAME__CRS, &hp_ccsr_uuid,
+ &buffer);
- info.descriptor = id;
- info.data = NULL;
+ resource = buffer.pointer;
+ vendor = &resource->data.vendor_typed;
- acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match,
- &info);
- if (!info.data)
- return AE_NOT_FOUND;
+ if (ACPI_FAILURE(status) || vendor->byte_length < 16) {
+ status = AE_NOT_FOUND;
+ goto exit;
+ }
- *data = info.data;
- *byte_length = info.length;
- return AE_OK;
+ memcpy(base, vendor->byte_data, sizeof(*base));
+ memcpy(length, vendor->byte_data + 8, sizeof(*length));
+
+ exit:
+ acpi_os_free(buffer.pointer);
+ return status;
}
-struct acpi_vendor_descriptor hp_ccsr_descriptor = {
- .guid_id = 2,
- .guid =
- EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01,
- 0x37, 0x0e, 0xad)
+struct csr_space {
+ u64 base;
+ u64 length;
};
-acpi_status hp_acpi_csr_space(acpi_handle obj, u64 * csr_base, u64 * csr_length)
+static acpi_status find_csr_space(struct acpi_resource *resource, void *data)
{
+ struct csr_space *space = data;
+ struct acpi_resource_address64 addr;
acpi_status status;
- u8 *data;
- u32 length;
- status =
- acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length);
+ status = acpi_resource_to_address64(resource, &addr);
+ if (ACPI_SUCCESS(status) &&
+ addr.resource_type == ACPI_MEMORY_RANGE &&
+ addr.address_length &&
+ addr.producer_consumer == ACPI_CONSUMER) {
+ space->base = addr.minimum;
+ space->length = addr.address_length;
+ return AE_CTRL_TERMINATE;
+ }
+ return AE_OK; /* keep looking */
+}
- if (ACPI_FAILURE(status) || length != 16)
- return AE_NOT_FOUND;
+static acpi_status hp_crs_locate(acpi_handle obj, u64 *base, u64 *length)
+{
+ struct csr_space space = { 0, 0 };
- memcpy(csr_base, data, sizeof(*csr_base));
- memcpy(csr_length, data + 8, sizeof(*csr_length));
- acpi_os_free(data);
+ acpi_walk_resources(obj, METHOD_NAME__CRS, find_csr_space, &space);
+ if (!space.length)
+ return AE_NOT_FOUND;
+ *base = space.base;
+ *length = space.length;
return AE_OK;
}
+acpi_status hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length)
+{
+ acpi_status status;
+
+ status = hp_ccsr_locate(obj, csr_base, csr_length);
+ if (ACPI_SUCCESS(status))
+ return status;
+
+ return hp_crs_locate(obj, csr_base, csr_length);
+}
EXPORT_SYMBOL(hp_acpi_csr_space);
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 750e8e7fbdc3..6e16f6b35bd3 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1606,5 +1606,8 @@ sys_call_table:
data8 sys_ni_syscall // 1295 reserved for ppoll
data8 sys_unshare
data8 sys_splice
+ data8 sys_set_robust_list
+ data8 sys_get_robust_list
+ data8 sys_sync_file_range // 1300
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 8963171788d5..5e6fdbe78bcd 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -581,10 +581,12 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
{
unsigned long flags;
int cpu = smp_processor_id();
+ struct ia64_mca_notify_die nd =
+ { .sos = NULL, .monarch_cpu = &monarch_cpu };
/* Mask all interrupts */
local_irq_save(flags);
- if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, 0, 0, 0)
+ if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
@@ -594,7 +596,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
*/
ia64_sal_mc_rendez();
- if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, 0, 0, 0)
+ if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
@@ -602,7 +604,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */
- if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, 0, 0, 0)
+ if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
@@ -1023,6 +1025,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
&sos->proc_state_param;
int recover, cpu = smp_processor_id();
task_t *previous_current;
+ struct ia64_mca_notify_die nd =
+ { .sos = sos, .monarch_cpu = &monarch_cpu };
oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
console_loglevel = 15; /* make sure printks make it to console */
@@ -1031,7 +1035,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
monarch_cpu = cpu;
- if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, 0, 0, 0)
+ if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
ia64_wait_for_slaves(cpu);
@@ -1043,7 +1047,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
* spinning in SAL does not work.
*/
ia64_mca_wakeup_all();
- if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, 0, 0, 0)
+ if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
@@ -1064,7 +1068,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
sos->os_status = IA64_MCA_CORRECTED;
}
- if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, 0, 0, recover)
+ if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
@@ -1351,10 +1355,14 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
static atomic_t monarchs;
task_t *previous_current;
int cpu = smp_processor_id();
+ struct ia64_mca_notify_die nd =
+ { .sos = sos, .monarch_cpu = &monarch_cpu };
oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
console_loglevel = 15; /* make sure printks make it to console */
+ (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0);
+
printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
sos->proc_state_param, cpu, sos->monarch);
salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
@@ -1390,15 +1398,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
while (monarch_cpu == -1)
cpu_relax(); /* spin until monarch enters */
- if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, 0, 0, 0)
+ if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
- if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, 0, 0, 0)
+ if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */
- if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, 0, 0, 0)
+ if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
printk("Slave on cpu %d returning to normal service.\n", cpu);
@@ -1409,7 +1417,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
}
monarch_cpu = cpu;
- if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, 0, 0, 0)
+ if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
@@ -1426,10 +1434,10 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
* to default_monarch_init_process() above and just print all the
* tasks.
*/
- if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, 0, 0, 0)
+ if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
- if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, 0, 0, 0)
+ if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
@@ -1631,6 +1639,7 @@ ia64_mca_init(void)
printk(KERN_INFO "Increasing MCA rendezvous timeout from "
"%ld to %ld milliseconds\n", timeout, isrv.v0);
timeout = isrv.v0;
+ (void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0);
continue;
}
printk(KERN_ERR "Failed to register rendezvous interrupt "
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 60a464bfd9e2..6dff024cd62b 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -827,7 +827,7 @@ ia64_state_restore:
ld8 r9=[temp2],16 // sal_gp
;;
ld8 r22=[temp1],16 // pal_min_state, virtual
- ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT
+ ld8 r13=[temp2],16 // prev_IA64_KR_CURRENT
;;
ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK
ld8 r20=[temp2],16 // prev_task
@@ -848,7 +848,7 @@ ia64_state_restore:
mov cr.iim=temp3
mov cr.iha=temp4
dep r22=0,r22,62,1 // pal_min_state, physical, uncached
- mov IA64_KR(CURRENT)=r21
+ mov IA64_KR(CURRENT)=r13
ld8 r8=[temp1] // os_status
ld8 r10=[temp2] // context
@@ -856,7 +856,7 @@ ia64_state_restore:
* avoid any dependencies on the algorithm in ia64_switch_to(), just
* purge any existing CURRENT_STACK mapping and insert the new one.
*
- * r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains
+ * r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains
* prev_IA64_KR_CURRENT, these values may have been changed by the C
* code. Do not use r8, r9, r10, r22, they contain values ready for
* the return to SAL.
@@ -873,7 +873,7 @@ ia64_state_restore:
;;
srlz.d
- extr.u r19=r21,61,3 // r21 = prev_IA64_KR_CURRENT
+ extr.u r19=r13,61,3 // r13 = prev_IA64_KR_CURRENT
shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK
movl r21=PAGE_KERNEL // page properties
;;
@@ -883,7 +883,7 @@ ia64_state_restore:
(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:(
;;
mov cr.itir=r18
- mov cr.ifa=r21
+ mov cr.ifa=r13
mov r20=IA64_TR_CURRENT_STACK
;;
itr.d dtr[r20]=r21
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 7a2f0a798d12..3a30cfc9574f 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -947,7 +947,7 @@ void
percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
{
unsigned int i;
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
memcpy(pcpudst + __per_cpu_offset[i], src, size);
}
}
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index af7eb087dca7..d98ec49570b8 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -60,6 +60,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
struct siginfo si;
unsigned long mask;
+ /* mmap_sem is performance critical.... */
+ prefetchw(&mm->mmap_sem);
+
/*
* If we're in an interrupt or have no user context, we must not take the fault..
*/
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index d0abddd9ffe6..8255a9be4632 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -1831,7 +1831,7 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
{
struct xpc_partition *part = &xpc_partitions[partid];
enum xpc_retval ret = xpcUnknownReason;
- struct xpc_msg *msg;
+ struct xpc_msg *msg = NULL;
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
OpenPOWER on IntegriCloud