summaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/sn/kernel')
-rw-r--r--arch/ia64/sn/kernel/Makefile2
-rw-r--r--arch/ia64/sn/kernel/bte.c17
-rw-r--r--arch/ia64/sn/kernel/io_init.c45
-rw-r--r--arch/ia64/sn/kernel/irq.c21
-rw-r--r--arch/ia64/sn/kernel/klconflib.c29
-rw-r--r--arch/ia64/sn/kernel/setup.c19
-rw-r--r--arch/ia64/sn/kernel/sn2/Makefile2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c196
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c6
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c19
10 files changed, 160 insertions, 196 deletions
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 4351c4ff9845..3e9b4eea7418 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -7,6 +7,8 @@
# Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved.
#
+CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
+
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
huberror.o io_init.o iomv.o klconflib.o sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index dd73c0cb754b..1f11db470d90 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/config.h>
@@ -186,18 +186,13 @@ retry_bteop:
/* Initialize the notification to a known value. */
*bte->most_rcnt_na = BTE_WORD_BUSY;
- notif_phys_addr = TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na));
+ notif_phys_addr = (u64)bte->most_rcnt_na;
- if (is_shub2()) {
- src = SH2_TIO_PHYS_TO_DMA(src);
- dest = SH2_TIO_PHYS_TO_DMA(dest);
- notif_phys_addr = SH2_TIO_PHYS_TO_DMA(notif_phys_addr);
- }
/* Set the source and destination registers */
- BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
- BTE_SRC_STORE(bte, TO_PHYS(src));
- BTE_PRINTKV(("IBDA = 0x%lx)\n", (TO_PHYS(dest))));
- BTE_DEST_STORE(bte, TO_PHYS(dest));
+ BTE_PRINTKV(("IBSA = 0x%lx)\n", src));
+ BTE_SRC_STORE(bte, src);
+ BTE_PRINTKV(("IBDA = 0x%lx)\n", dest));
+ BTE_DEST_STORE(bte, dest);
/* Set the notification register */
BTE_PRINTKV(("IBNA = 0x%lx)\n", notif_phys_addr));
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 00700f7e6837..d7e4d79e16a8 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -10,6 +10,7 @@
#include <linux/nodemask.h>
#include <asm/sn/types.h>
#include <asm/sn/addrs.h>
+#include <asm/sn/sn_feature_sets.h>
#include <asm/sn/geo.h>
#include <asm/sn/io.h>
#include <asm/sn/pcibr_provider.h>
@@ -173,8 +174,8 @@ sn_pcidev_info_get(struct pci_dev *dev)
*/
static u8 war_implemented = 0;
-static void sn_device_fixup_war(u64 nasid, u64 widget, int device,
- struct sn_flush_device_common *common)
+static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
+ struct sn_flush_device_common *common)
{
struct sn_flush_device_war *war_list;
struct sn_flush_device_war *dev_entry;
@@ -198,15 +199,16 @@ static void sn_device_fixup_war(u64 nasid, u64 widget, int device,
dev_entry = war_list + device;
memcpy(common,dev_entry, sizeof(*common));
-
kfree(war_list);
+
+ return isrv.status;
}
/*
* sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
* each node in the system.
*/
-static void sn_fixup_ionodes(void)
+static void __init sn_fixup_ionodes(void)
{
struct sn_flush_device_kernel *sn_flush_device_kernel;
struct sn_flush_device_kernel *dev_entry;
@@ -279,23 +281,21 @@ static void sn_fixup_ionodes(void)
memset(dev_entry->common, 0x0, sizeof(struct
sn_flush_device_common));
- status = sal_get_device_dmaflush_list(nasid,
- widget,
- device,
+ if (sn_prom_feature_available(
+ PRF_DEVICE_FLUSH_LIST))
+ status = sal_get_device_dmaflush_list(
+ nasid,
+ widget,
+ device,
(u64)(dev_entry->common));
- if (status) {
- if (sn_sal_rev() < 0x0450) {
- /* shortlived WAR for older
- * PROM images
- */
- sn_device_fixup_war(nasid,
- widget,
- device,
+ else
+ status = sn_device_fixup_war(nasid,
+ widget,
+ device,
dev_entry->common);
- }
- else
- BUG();
- }
+ if (status != SALRET_OK)
+ panic("SAL call failed: %s\n",
+ ia64_sal_strerror(status));
spin_lock_init(&dev_entry->sfdl_flush_lock);
}
@@ -467,6 +467,13 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
pcidev_info->pdi_sn_irq_info = NULL;
kfree(sn_irq_info);
}
+
+ /*
+ * MSI currently not supported on altix. Remove this when
+ * the MSI abstraction patches are integrated into the kernel
+ * (sometime after 2.6.16 releases)
+ */
+ dev->no_msi = 1;
}
/*
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index ec37084bdc17..74d87d903d5d 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -5,11 +5,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/irq.h>
#include <linux/spinlock.h>
+#include <linux/init.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/intr.h>
@@ -76,17 +77,15 @@ static void sn_enable_irq(unsigned int irq)
static void sn_ack_irq(unsigned int irq)
{
- u64 event_occurred, mask = 0;
+ u64 event_occurred, mask;
irq = irq & 0xff;
- event_occurred =
- HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
+ event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
mask = event_occurred & SH_ALL_INT_MASK;
- HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS),
- mask);
+ HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
- move_irq(irq);
+ move_native_irq(irq);
}
static void sn_end_irq(unsigned int irq)
@@ -219,9 +218,8 @@ static void register_intr_pda(struct sn_irq_info *sn_irq_info)
pdacpu(cpu)->sn_last_irq = irq;
}
- if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) {
+ if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq)
pdacpu(cpu)->sn_first_irq = irq;
- }
}
static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
@@ -289,7 +287,7 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
spin_unlock(&sn_irq_info_lock);
- (void)register_intr_pda(sn_irq_info);
+ register_intr_pda(sn_irq_info);
}
void sn_irq_unfixup(struct pci_dev *pci_dev)
@@ -419,7 +417,7 @@ void sn_lb_int_war_check(void)
rcu_read_unlock();
}
-void sn_irq_lh_init(void)
+void __init sn_irq_lh_init(void)
{
int i;
@@ -434,5 +432,4 @@ void sn_irq_lh_init(void)
INIT_LIST_HEAD(sn_irq_lh[i]);
}
-
}
diff --git a/arch/ia64/sn/kernel/klconflib.c b/arch/ia64/sn/kernel/klconflib.c
index 0f11a3299cd2..87682b48ef83 100644
--- a/arch/ia64/sn/kernel/klconflib.c
+++ b/arch/ia64/sn/kernel/klconflib.c
@@ -78,31 +78,30 @@ format_module_id(char *buffer, moduleid_t m, int fmt)
position = MODULE_GET_BPOS(m);
if ((fmt == MODULE_FORMAT_BRIEF) || (fmt == MODULE_FORMAT_LCD)) {
- /* Brief module number format, eg. 002c15 */
+ /* Brief module number format, eg. 002c15 */
- /* Decompress the rack number */
- *buffer++ = '0' + RACK_GET_CLASS(rack);
- *buffer++ = '0' + RACK_GET_GROUP(rack);
- *buffer++ = '0' + RACK_GET_NUM(rack);
+ /* Decompress the rack number */
+ *buffer++ = '0' + RACK_GET_CLASS(rack);
+ *buffer++ = '0' + RACK_GET_GROUP(rack);
+ *buffer++ = '0' + RACK_GET_NUM(rack);
- /* Add the brick type */
- *buffer++ = brickchar;
+ /* Add the brick type */
+ *buffer++ = brickchar;
}
else if (fmt == MODULE_FORMAT_LONG) {
- /* Fuller hwgraph format, eg. rack/002/bay/15 */
+ /* Fuller hwgraph format, eg. rack/002/bay/15 */
- strcpy(buffer, "rack" "/"); buffer += strlen(buffer);
+ strcpy(buffer, "rack" "/"); buffer += strlen(buffer);
- *buffer++ = '0' + RACK_GET_CLASS(rack);
- *buffer++ = '0' + RACK_GET_GROUP(rack);
- *buffer++ = '0' + RACK_GET_NUM(rack);
+ *buffer++ = '0' + RACK_GET_CLASS(rack);
+ *buffer++ = '0' + RACK_GET_GROUP(rack);
+ *buffer++ = '0' + RACK_GET_NUM(rack);
- strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer);
+ strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer);
}
/* Add the bay position, using at least two digits */
if (position < 10)
- *buffer++ = '0';
+ *buffer++ = '0';
sprintf(buffer, "%d", position);
-
}
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index e510dce9971f..ee36bff93c30 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -209,7 +209,7 @@ void __init early_sn_setup(void)
}
extern int platform_intr_list[];
-static int __initdata shub_1_1_found = 0;
+static int __initdata shub_1_1_found;
/*
* sn_check_for_wars
@@ -578,13 +578,17 @@ void __init sn_cpu_init(void)
sn_prom_type = 2;
else
sn_prom_type = 1;
- printk("Running on medusa with %s PROM\n", (sn_prom_type == 1) ? "real" : "fake");
+ printk(KERN_INFO "Running on medusa with %s PROM\n",
+ (sn_prom_type == 1) ? "real" : "fake");
}
memset(pda, 0, sizeof(pda));
- if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift,
- &sn_system_size, &sn_sharing_domain_size, &sn_partition_id,
- &sn_coherency_id, &sn_region_size))
+ if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
+ &sn_hub_info->nasid_bitmask,
+ &sn_hub_info->nasid_shift,
+ &sn_system_size, &sn_sharing_domain_size,
+ &sn_partition_id, &sn_coherency_id,
+ &sn_region_size))
BUG();
sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;
@@ -716,7 +720,8 @@ void __init build_cnode_tables(void)
for_each_online_node(node) {
kl_config_hdr_t *klgraph_header;
nasid = cnodeid_to_nasid(node);
- if ((klgraph_header = ia64_sn_get_klconfig_addr(nasid)) == NULL)
+ klgraph_header = ia64_sn_get_klconfig_addr(nasid);
+ if (klgraph_header == NULL)
BUG();
brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info);
while (brd) {
@@ -734,7 +739,7 @@ nasid_slice_to_cpuid(int nasid, int slice)
{
long cpu;
- for (cpu=0; cpu < NR_CPUS; cpu++)
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
if (cpuid_to_nasid(cpu) == nasid &&
cpuid_to_slice(cpu) == slice)
return cpu;
diff --git a/arch/ia64/sn/kernel/sn2/Makefile b/arch/ia64/sn/kernel/sn2/Makefile
index 170bde4549da..99e177693234 100644
--- a/arch/ia64/sn/kernel/sn2/Makefile
+++ b/arch/ia64/sn/kernel/sn2/Makefile
@@ -9,5 +9,7 @@
# sn2 specific kernel files
#
+CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
+
obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 471bbaa65d1b..f153a4c35c70 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/init.h>
@@ -46,104 +46,28 @@ DECLARE_PER_CPU(struct ptc_stats, ptcstats);
static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
-void sn2_ptc_deadlock_recovery(short *, short, int, volatile unsigned long *, unsigned long data0,
- volatile unsigned long *, unsigned long data1);
+void sn2_ptc_deadlock_recovery(short *, short, short, int, volatile unsigned long *, unsigned long,
+ volatile unsigned long *, unsigned long);
-#ifdef DEBUG_PTC
/*
- * ptctest:
- *
- * xyz - 3 digit hex number:
- * x - Force PTC purges to use shub:
- * 0 - no force
- * 1 - force
- * y - interupt enable
- * 0 - disable interrupts
- * 1 - leave interuupts enabled
- * z - type of lock:
- * 0 - global lock
- * 1 - node local lock
- * 2 - no lock
- *
- * Note: on shub1, only ptctest == 0 is supported. Don't try other values!
+ * Note: some is the following is captured here to make degugging easier
+ * (the macros make more sense if you see the debug patch - not posted)
*/
-
-static unsigned int sn2_ptctest = 0;
-
-static int __init ptc_test(char *str)
-{
- get_option(&str, &sn2_ptctest);
- return 1;
-}
-__setup("ptctest=", ptc_test);
-
-static inline int ptc_lock(unsigned long *flagp)
-{
- unsigned long opt = sn2_ptctest & 255;
-
- switch (opt) {
- case 0x00:
- spin_lock_irqsave(&sn2_global_ptc_lock, *flagp);
- break;
- case 0x01:
- spin_lock_irqsave(&sn_nodepda->ptc_lock, *flagp);
- break;
- case 0x02:
- local_irq_save(*flagp);
- break;
- case 0x10:
- spin_lock(&sn2_global_ptc_lock);
- break;
- case 0x11:
- spin_lock(&sn_nodepda->ptc_lock);
- break;
- case 0x12:
- break;
- default:
- BUG();
- }
- return opt;
-}
-
-static inline void ptc_unlock(unsigned long flags, int opt)
-{
- switch (opt) {
- case 0x00:
- spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
- break;
- case 0x01:
- spin_unlock_irqrestore(&sn_nodepda->ptc_lock, flags);
- break;
- case 0x02:
- local_irq_restore(flags);
- break;
- case 0x10:
- spin_unlock(&sn2_global_ptc_lock);
- break;
- case 0x11:
- spin_unlock(&sn_nodepda->ptc_lock);
- break;
- case 0x12:
- break;
- default:
- BUG();
- }
-}
-#else
-
#define sn2_ptctest 0
+#define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0)
+#define max_active_pio(sh1) ((sh1) ? 32 : 7)
+#define reset_max_active_on_deadlock() 1
+#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock)
-static inline int ptc_lock(unsigned long *flagp)
+static inline void ptc_lock(int sh1, unsigned long *flagp)
{
- spin_lock_irqsave(&sn2_global_ptc_lock, *flagp);
- return 0;
+ spin_lock_irqsave(PTC_LOCK(sh1), *flagp);
}
-static inline void ptc_unlock(unsigned long flags, int opt)
+static inline void ptc_unlock(int sh1, unsigned long flags)
{
- spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
+ spin_unlock_irqrestore(PTC_LOCK(sh1), flags);
}
-#endif
struct ptc_stats {
unsigned long ptc_l;
@@ -151,27 +75,30 @@ struct ptc_stats {
unsigned long shub_ptc_flushes;
unsigned long nodes_flushed;
unsigned long deadlocks;
+ unsigned long deadlocks2;
unsigned long lock_itc_clocks;
unsigned long shub_itc_clocks;
unsigned long shub_itc_clocks_max;
+ unsigned long shub_ptc_flushes_not_my_mm;
};
static inline unsigned long wait_piowc(void)
{
- volatile unsigned long *piows, zeroval;
- unsigned long ws;
+ volatile unsigned long *piows;
+ unsigned long zeroval, ws;
piows = pda->pio_write_status_addr;
zeroval = pda->pio_write_status_val;
do {
cpu_relax();
} while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval);
- return ws;
+ return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0;
}
void sn_tlb_migrate_finish(struct mm_struct *mm)
{
- if (mm == current->mm)
+ /* flush_tlb_mm is inefficient if more than 1 users of mm */
+ if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
flush_tlb_mm(mm);
}
@@ -201,12 +128,14 @@ void
sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long nbits)
{
- int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
- int mymm = (mm == current->active_mm && current->mm);
+ int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid;
+ int mymm = (mm == current->active_mm && mm == current->mm);
+ int use_cpu_ptcga;
volatile unsigned long *ptc0, *ptc1;
- unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value;
+ unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
short nasids[MAX_NUMNODES], nix;
nodemask_t nodes_flushed;
+ int active, max_active, deadlock;
nodes_clear(nodes_flushed);
i = 0;
@@ -267,41 +196,56 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
mynasid = get_nasid();
+ use_cpu_ptcga = local_node_uses_ptc_ga(shub1);
+ max_active = max_active_pio(shub1);
itc = ia64_get_itc();
- opt = ptc_lock(&flags);
+ ptc_lock(shub1, &flags);
itc2 = ia64_get_itc();
+
__get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc;
__get_cpu_var(ptcstats).shub_ptc_flushes++;
__get_cpu_var(ptcstats).nodes_flushed += nix;
+ if (!mymm)
+ __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++;
+ if (use_cpu_ptcga && !mymm) {
+ old_rr = ia64_get_rr(start);
+ ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8));
+ ia64_srlz_d();
+ }
+
+ wait_piowc();
do {
if (shub1)
data1 = start | (1UL << SH1_PTC_1_START_SHFT);
else
data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
- for (i = 0; i < nix; i++) {
+ deadlock = 0;
+ active = 0;
+ for (ibegin = 0, i = 0; i < nix; i++) {
nasid = nasids[i];
- if ((!(sn2_ptctest & 3)) && unlikely(nasid == mynasid && mymm)) {
+ if (use_cpu_ptcga && unlikely(nasid == mynasid)) {
ia64_ptcga(start, nbits << 2);
ia64_srlz_i();
} else {
ptc0 = CHANGE_NASID(nasid, ptc0);
if (ptc1)
ptc1 = CHANGE_NASID(nasid, ptc1);
- pio_atomic_phys_write_mmrs(ptc0, data0, ptc1,
- data1);
- flushed = 1;
+ pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
+ active++;
+ }
+ if (active >= max_active || i == (nix - 1)) {
+ if ((deadlock = wait_piowc())) {
+ sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1);
+ if (reset_max_active_on_deadlock())
+ max_active = 1;
+ }
+ active = 0;
+ ibegin = i + 1;
}
}
- if (flushed
- && (wait_piowc() &
- (SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK))) {
- sn2_ptc_deadlock_recovery(nasids, nix, mynasid, ptc0, data0, ptc1, data1);
- }
-
start += (1UL << nbits);
-
} while (start < end);
itc2 = ia64_get_itc() - itc2;
@@ -309,7 +253,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
__get_cpu_var(ptcstats).shub_itc_clocks_max = itc2;
- ptc_unlock(flags, opt);
+ if (old_rr) {
+ ia64_set_rr(start, old_rr);
+ ia64_srlz_d();
+ }
+
+ ptc_unlock(shub1, flags);
preempt_enable();
}
@@ -321,27 +270,30 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
* TLB flush transaction. The recovery sequence is somewhat tricky & is
* coded in assembly language.
*/
-void sn2_ptc_deadlock_recovery(short *nasids, short nix, int mynasid, volatile unsigned long *ptc0, unsigned long data0,
+void sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, volatile unsigned long *ptc0, unsigned long data0,
volatile unsigned long *ptc1, unsigned long data1)
{
- extern void sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
+ extern unsigned long sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long);
short nasid, i;
- unsigned long *piows, zeroval;
+ unsigned long *piows, zeroval, n;
__get_cpu_var(ptcstats).deadlocks++;
piows = (unsigned long *) pda->pio_write_status_addr;
zeroval = pda->pio_write_status_val;
- for (i=0; i < nix; i++) {
+
+ for (i=ib; i <= ie; i++) {
nasid = nasids[i];
- if (!(sn2_ptctest & 3) && nasid == mynasid)
+ if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid)
continue;
ptc0 = CHANGE_NASID(nasid, ptc0);
if (ptc1)
ptc1 = CHANGE_NASID(nasid, ptc1);
- sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
+
+ n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
+ __get_cpu_var(ptcstats).deadlocks2 += n;
}
}
@@ -452,20 +404,22 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
cpu = *(loff_t *) data;
if (!cpu) {
- seq_printf(file, "# ptc_l change_rid shub_ptc_flushes shub_nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max\n");
+ seq_printf(file,
+ "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2\n");
seq_printf(file, "# ptctest %d\n", sn2_ptctest);
}
if (cpu < NR_CPUS && cpu_online(cpu)) {
stat = &per_cpu(ptcstats, cpu);
- seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
+ seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
stat->deadlocks,
1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
- 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec);
+ 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
+ stat->shub_ptc_flushes_not_my_mm,
+ stat->deadlocks2);
}
-
return 0;
}
@@ -476,7 +430,7 @@ static struct seq_operations sn2_ptc_seq_ops = {
.show = sn2_ptc_seq_show
};
-int sn2_ptc_proc_open(struct inode *inode, struct file *file)
+static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &sn2_ptc_seq_ops);
}
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index 8d950c778bb6..36e5437a0fb6 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -447,7 +447,7 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
nbytes = nentries * ch->msg_size;
ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
- (GFP_KERNEL | GFP_DMA),
+ GFP_KERNEL,
&ch->local_msgqueue_base);
if (ch->local_msgqueue == NULL) {
continue;
@@ -455,7 +455,7 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
memset(ch->local_msgqueue, 0, nbytes);
nbytes = nentries * sizeof(struct xpc_notify);
- ch->notify_queue = kmalloc(nbytes, (GFP_KERNEL | GFP_DMA));
+ ch->notify_queue = kmalloc(nbytes, GFP_KERNEL);
if (ch->notify_queue == NULL) {
kfree(ch->local_msgqueue_base);
ch->local_msgqueue = NULL;
@@ -502,7 +502,7 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
nbytes = nentries * ch->msg_size;
ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
- (GFP_KERNEL | GFP_DMA),
+ GFP_KERNEL,
&ch->remote_msgqueue_base);
if (ch->remote_msgqueue == NULL) {
continue;
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index c75f8aeefc2b..9cd460dfe27e 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -575,18 +575,21 @@ xpc_activate_partition(struct xpc_partition *part)
spin_lock_irqsave(&part->act_lock, irq_flags);
- pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);
-
DBUG_ON(part->act_state != XPC_P_INACTIVE);
- if (pid > 0) {
- part->act_state = XPC_P_ACTIVATION_REQ;
- XPC_SET_REASON(part, xpcCloneKThread, __LINE__);
- } else {
- XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
- }
+ part->act_state = XPC_P_ACTIVATION_REQ;
+ XPC_SET_REASON(part, xpcCloneKThread, __LINE__);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
+
+ pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);
+
+ if (unlikely(pid <= 0)) {
+ spin_lock_irqsave(&part->act_lock, irq_flags);
+ part->act_state = XPC_P_INACTIVE;
+ XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
+ spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ }
}
OpenPOWER on IntegriCloud