summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c36
-rw-r--r--arch/powerpc/platforms/cell/iommu.c37
-rw-r--r--arch/powerpc/platforms/cell/ras.c7
-rw-r--r--arch/powerpc/platforms/cell/smp.c9
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c3
5 files changed, 63 insertions, 29 deletions
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 896548ba1ca1..442cf36aa172 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -95,6 +95,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
struct axon_msic *msic = get_irq_data(irq);
u32 write_offset, msi;
int idx;
+ int retry = 0;
write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
pr_debug("axon_msi: original write_offset 0x%x\n", write_offset);
@@ -102,7 +103,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
/* write_offset doesn't wrap properly, so we have to mask it */
write_offset &= MSIC_FIFO_SIZE_MASK;
- while (msic->read_offset != write_offset) {
+ while (msic->read_offset != write_offset && retry < 100) {
idx = msic->read_offset / sizeof(__le32);
msi = le32_to_cpu(msic->fifo_virt[idx]);
msi &= 0xFFFF;
@@ -110,13 +111,37 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
pr_debug("axon_msi: woff %x roff %x msi %x\n",
write_offset, msic->read_offset, msi);
+ if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) {
+ generic_handle_irq(msi);
+ msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
+ } else {
+ /*
+ * Reading the MSIC_WRITE_OFFSET_REG does not
+ * reliably flush the outstanding DMA to the
+ * FIFO buffer. Here we were reading stale
+ * data, so we need to retry.
+ */
+ udelay(1);
+ retry++;
+ pr_debug("axon_msi: invalid irq 0x%x!\n", msi);
+ continue;
+ }
+
+ if (retry) {
+ pr_debug("axon_msi: late irq 0x%x, retry %d\n",
+ msi, retry);
+ retry = 0;
+ }
+
msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
msic->read_offset &= MSIC_FIFO_SIZE_MASK;
+ }
- if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host)
- generic_handle_irq(msi);
- else
- pr_debug("axon_msi: invalid irq 0x%x!\n", msi);
+ if (retry) {
+ printk(KERN_WARNING "axon_msi: irq timed out\n");
+
+ msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
+ msic->read_offset &= MSIC_FIFO_SIZE_MASK;
}
desc->chip->eoi(irq);
@@ -364,6 +389,7 @@ static int axon_msi_probe(struct of_device *device,
dn->full_name);
goto out_free_fifo;
}
+ memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP,
NR_IRQS, &msic_host_ops, 0);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index ef92e7146215..3168272ab0d7 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -593,31 +593,30 @@ static void dma_fixed_free_coherent(struct device *dev, size_t size,
dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle);
}
-static dma_addr_t dma_fixed_map_single(struct device *dev, void *ptr,
- size_t size,
- enum dma_data_direction direction,
- struct dma_attrs *attrs)
+static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
- return dma_direct_ops.map_single(dev, ptr, size, direction,
- attrs);
+ return dma_direct_ops.map_page(dev, page, offset, size,
+ direction, attrs);
else
- return iommu_map_single(dev, cell_get_iommu_table(dev), ptr,
- size, device_to_mask(dev), direction,
- attrs);
+ return iommu_map_page(dev, cell_get_iommu_table(dev), page,
+ offset, size, device_to_mask(dev),
+ direction, attrs);
}
-static void dma_fixed_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction,
- struct dma_attrs *attrs)
+static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
- dma_direct_ops.unmap_single(dev, dma_addr, size, direction,
- attrs);
+ dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
+ attrs);
else
- iommu_unmap_single(cell_get_iommu_table(dev), dma_addr, size,
- direction, attrs);
+ iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size,
+ direction, attrs);
}
static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
@@ -652,12 +651,12 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
struct dma_mapping_ops dma_iommu_fixed_ops = {
.alloc_coherent = dma_fixed_alloc_coherent,
.free_coherent = dma_fixed_free_coherent,
- .map_single = dma_fixed_map_single,
- .unmap_single = dma_fixed_unmap_single,
.map_sg = dma_fixed_map_sg,
.unmap_sg = dma_fixed_unmap_sg,
.dma_supported = dma_fixed_dma_supported,
.set_dma_mask = dma_set_mask_and_switch,
+ .map_page = dma_fixed_map_page,
+ .unmap_page = dma_fixed_unmap_page,
};
static void cell_dma_dev_setup_fixed(struct device *dev);
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 665af1c4195b..7b4cefa2199b 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -13,15 +13,16 @@
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/reboot.h>
+#include <linux/kexec.h>
+#include <linux/crash_dump.h>
+#include <asm/kexec.h>
#include <asm/reg.h>
#include <asm/io.h>
#include <asm/prom.h>
-#include <asm/kexec.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/cell-regs.h>
-#include <asm/kdump.h>
#include "ras.h"
@@ -112,7 +113,7 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
int ret = -ENOMEM;
unsigned long addr;
- if (__kdump_flag)
+ if (is_kdump_kernel())
rtas_call(ptcal_stop_tok, 1, 1, NULL, nid);
area = kmalloc(sizeof(*area), GFP_KERNEL);
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index c0d86e1f56ea..9046803c8276 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -129,10 +129,15 @@ static int __init smp_iic_probe(void)
return cpus_weight(cpu_possible_map);
}
-static void __devinit smp_iic_setup_cpu(int cpu)
+static void __devinit smp_cell_setup_cpu(int cpu)
{
if (cpu != boot_cpuid)
iic_setup_cpu();
+
+ /*
+ * change default DABRX to allow user watchpoints
+ */
+ mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
}
static DEFINE_SPINLOCK(timebase_lock);
@@ -192,7 +197,7 @@ static struct smp_ops_t bpa_iic_smp_ops = {
.message_pass = smp_iic_message_pass,
.probe = smp_iic_probe,
.kick_cpu = smp_cell_kick_cpu,
- .setup_cpu = smp_iic_setup_cpu,
+ .setup_cpu = smp_cell_setup_cpu,
.cpu_bootable = smp_cell_cpu_bootable,
};
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index b73c369cc6f1..1b26071a86ca 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -390,6 +390,9 @@ static int spufs_ps_fault(struct vm_area_struct *vma,
if (offset >= ps_size)
return VM_FAULT_SIGBUS;
+ if (fatal_signal_pending(current))
+ return VM_FAULT_SIGBUS;
+
/*
* Because we release the mmap_sem, the context may be destroyed while
* we're in spu_wait. Grab an extra reference so it isn't destroyed
OpenPOWER on IntegriCloud