summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorTudor Laurentiu <b10716@freescale.com>2014-08-21 12:33:53 +0300
committerScott Wood <scottwood@freescale.com>2014-09-04 18:51:45 -0500
commitc822e73731fce3b49a4887140878d084d8a44c08 (patch)
tree5ad4eccb8fb9e033eb3b379d7060bb6f6e86b2ba /arch/powerpc
parentde99f53d3af0e1bb9bc6c9e3107d72f3af6e05e9 (diff)
downloadblackbird-op-linux-c822e73731fce3b49a4887140878d084d8a44c08.tar.gz
blackbird-op-linux-c822e73731fce3b49a4887140878d084d8a44c08.zip
powerpc/fsl_msi: spread msi ints across different MSIRs
Allocate msis such that each time a new interrupt is requested, the SRS (MSIR register select) to be used is allocated in a round-robin fashion. The end result is that the msi interrupts will be spread across distinct MSIRs with the main benefit that now users can set affinity to each msi int through the mpic irq backing up the MSIR register. This is achieved with the help of a newly introduced msi bitmap api that allows specifying the starting point when searching for a free msi interrupt. Signed-off-by: Laurentiu Tudor <Laurentiu.Tudor@freescale.com> Cc: Scott Wood <scottwood@freescale.com> Cc: Mihai Caraman <mihai.caraman@freescale.com> Signed-off-by: Scott Wood <scottwood@freescale.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/msi_bitmap.h2
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c31
-rw-r--r--arch/powerpc/sysdev/fsl_msi.h5
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c25
4 files changed, 52 insertions, 11 deletions
diff --git a/arch/powerpc/include/asm/msi_bitmap.h b/arch/powerpc/include/asm/msi_bitmap.h
index 97ac3f46ae0d..96c2f9500574 100644
--- a/arch/powerpc/include/asm/msi_bitmap.h
+++ b/arch/powerpc/include/asm/msi_bitmap.h
@@ -25,6 +25,8 @@ int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num);
void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset,
unsigned int num);
void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq);
+int msi_bitmap_alloc_hwirqs_from_offset(struct msi_bitmap *bmp, int offset,
+ int num);
int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp);
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index e2ee226464f8..37254eff7324 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -213,6 +213,8 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
* available interrupt.
*/
list_for_each_entry(msi_data, &msi_head, list) {
+ int off;
+
/*
* If the PCI node has an fsl,msi property, then we
* restrict our search to the corresponding MSI node.
@@ -224,7 +226,28 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
if (phandle && (phandle != msi_data->phandle))
continue;
- hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
+ /*
+ * Allocate the msi message so that it fits on distinct
+ * MSIR registers. Obviously, since MSIR registers are
+ * limited they will overlap at one point.
+ *
+ * Due to the format of the newly introduced MSIIR1 in
+ * mpic 4.3, consecutive msi message values map to
+ * distinct MSIRs, thus distinct msi irq cascades, so
+ * nothing special needs to be done in this case.
+ * On older mpic versions the chose distinct SRS
+ * values by aligning the msi message value to the
+ * SRS field shift.
+ */
+ if (msi_data->feature & FSL_PIC_FTR_MPIC_4_3) {
+ off = 0;
+ } else {
+ off = atomic_inc_return(&msi_data->msi_alloc_cnt) %
+ msi_data->msir_num;
+ off <<= msi_data->srs_shift;
+ }
+ hwirq = msi_bitmap_alloc_hwirqs_from_offset(
+ &msi_data->bitmap, off, 1);
if (hwirq >= 0)
break;
}
@@ -464,12 +487,17 @@ static int fsl_of_msi_probe(struct platform_device *dev)
goto error_out;
}
+ atomic_set(&msi->msi_alloc_cnt, -1);
+
p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
msi->srs_shift = MSIIR1_SRS_SHIFT;
msi->ibs_shift = MSIIR1_IBS_SHIFT;
+ msi->msir_num = NR_MSI_REG_MSIIR1;
+ msi->feature |= FSL_PIC_FTR_MPIC_4_3;
+
if (p)
dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
__func__);
@@ -487,6 +515,7 @@ static int fsl_of_msi_probe(struct platform_device *dev)
msi->srs_shift = MSIIR_SRS_SHIFT;
msi->ibs_shift = MSIIR_IBS_SHIFT;
+ msi->msir_num = NR_MSI_REG_MSIIR;
if (p && len % (2 * sizeof(u32)) != 0) {
dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h
index 420cfcbdac01..50ec4b04c732 100644
--- a/arch/powerpc/sysdev/fsl_msi.h
+++ b/arch/powerpc/sysdev/fsl_msi.h
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <asm/msi_bitmap.h>
+#include <asm/atomic.h>
#define NR_MSI_REG_MSIIR 8 /* MSIIR can index 8 MSI registers */
#define NR_MSI_REG_MSIIR1 16 /* MSIIR1 can index 16 MSI registers */
@@ -27,6 +28,8 @@
#define FSL_PIC_IP_IPIC 0x00000002
#define FSL_PIC_IP_VMPIC 0x00000003
+#define FSL_PIC_FTR_MPIC_4_3 0x00000010
+
struct fsl_msi_cascade_data;
struct fsl_msi {
@@ -37,6 +40,8 @@ struct fsl_msi {
u32 msiir_offset; /* Offset of MSIIR, relative to start of CCSR */
u32 ibs_shift; /* Shift of interrupt bit select */
u32 srs_shift; /* Shift of the shared interrupt register select */
+ u32 msir_num; /* Number of available MSIR regs */
+ atomic_t msi_alloc_cnt; /* Counter for MSI hwirq allocations */
void __iomem *msi_regs;
u32 feature;
struct fsl_msi_cascade_data *cascade_array[NR_MSI_REG_MAX];
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index 2ff630267e9e..8b7d8fc2b120 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -14,23 +14,28 @@
#include <asm/msi_bitmap.h>
#include <asm/setup.h>
-int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num)
+int msi_bitmap_alloc_hwirqs_from_offset(struct msi_bitmap *bmp, int offset,
+ int num)
{
unsigned long flags;
- int offset, order = get_count_order(num);
+ int index;
+ int order = get_count_order(num);
spin_lock_irqsave(&bmp->lock, flags);
- /*
- * This is fast, but stricter than we need. We might want to add
- * a fallback routine which does a linear search with no alignment.
- */
- offset = bitmap_find_free_region(bmp->bitmap, bmp->irq_count, order);
+ index = bitmap_find_next_zero_area(bmp->bitmap, bmp->irq_count,
+ offset, num, (1 << order) - 1);
+ bitmap_set(bmp->bitmap, index, num);
spin_unlock_irqrestore(&bmp->lock, flags);
- pr_debug("msi_bitmap: allocated 0x%x (2^%d) at offset 0x%x\n",
- num, order, offset);
+ pr_debug("msi_bitmap: found %d free bits starting from offset %d at index %d\n",
+ num, offset, index);
- return offset;
+ return index;
+}
+
+int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num)
+{
+ return msi_bitmap_alloc_hwirqs_from_offset(bmp, 0, num);
}
void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset,
OpenPOWER on IntegriCloud